##// END OF EJS Templates
revlog: stop calling `basetext` `rawtext` in _revisiondata...
marmoute -
r43056:2eec53a9 default
parent child Browse files
Show More
@@ -1,2659 +1,2661 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullhex,
28 nullhex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 short,
31 short,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .revlogutils.constants import (
38 from .revlogutils.constants import (
39 FLAG_GENERALDELTA,
39 FLAG_GENERALDELTA,
40 FLAG_INLINE_DATA,
40 FLAG_INLINE_DATA,
41 REVLOGV0,
41 REVLOGV0,
42 REVLOGV1,
42 REVLOGV1,
43 REVLOGV1_FLAGS,
43 REVLOGV1_FLAGS,
44 REVLOGV2,
44 REVLOGV2,
45 REVLOGV2_FLAGS,
45 REVLOGV2_FLAGS,
46 REVLOG_DEFAULT_FLAGS,
46 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FORMAT,
47 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_VERSION,
48 REVLOG_DEFAULT_VERSION,
49 )
49 )
50 from .revlogutils.flagutil import (
50 from .revlogutils.flagutil import (
51 REVIDX_DEFAULT_FLAGS,
51 REVIDX_DEFAULT_FLAGS,
52 REVIDX_ELLIPSIS,
52 REVIDX_ELLIPSIS,
53 REVIDX_EXTSTORED,
53 REVIDX_EXTSTORED,
54 REVIDX_FLAGS_ORDER,
54 REVIDX_FLAGS_ORDER,
55 REVIDX_ISCENSORED,
55 REVIDX_ISCENSORED,
56 REVIDX_RAWTEXT_CHANGING_FLAGS,
56 REVIDX_RAWTEXT_CHANGING_FLAGS,
57 )
57 )
58 from .thirdparty import (
58 from .thirdparty import (
59 attr,
59 attr,
60 )
60 )
61 from . import (
61 from . import (
62 ancestor,
62 ancestor,
63 dagop,
63 dagop,
64 error,
64 error,
65 mdiff,
65 mdiff,
66 policy,
66 policy,
67 pycompat,
67 pycompat,
68 repository,
68 repository,
69 templatefilters,
69 templatefilters,
70 util,
70 util,
71 )
71 )
72 from .revlogutils import (
72 from .revlogutils import (
73 deltas as deltautil,
73 deltas as deltautil,
74 flagutil,
74 flagutil,
75 )
75 )
76 from .utils import (
76 from .utils import (
77 interfaceutil,
77 interfaceutil,
78 storageutil,
78 storageutil,
79 stringutil,
79 stringutil,
80 )
80 )
81
81
82 # blanked usage of all the name to prevent pyflakes constraints
82 # blanked usage of all the name to prevent pyflakes constraints
83 # We need these name available in the module for extensions.
83 # We need these name available in the module for extensions.
84 REVLOGV0
84 REVLOGV0
85 REVLOGV1
85 REVLOGV1
86 REVLOGV2
86 REVLOGV2
87 FLAG_INLINE_DATA
87 FLAG_INLINE_DATA
88 FLAG_GENERALDELTA
88 FLAG_GENERALDELTA
89 REVLOG_DEFAULT_FLAGS
89 REVLOG_DEFAULT_FLAGS
90 REVLOG_DEFAULT_FORMAT
90 REVLOG_DEFAULT_FORMAT
91 REVLOG_DEFAULT_VERSION
91 REVLOG_DEFAULT_VERSION
92 REVLOGV1_FLAGS
92 REVLOGV1_FLAGS
93 REVLOGV2_FLAGS
93 REVLOGV2_FLAGS
94 REVIDX_ISCENSORED
94 REVIDX_ISCENSORED
95 REVIDX_ELLIPSIS
95 REVIDX_ELLIPSIS
96 REVIDX_EXTSTORED
96 REVIDX_EXTSTORED
97 REVIDX_DEFAULT_FLAGS
97 REVIDX_DEFAULT_FLAGS
98 REVIDX_FLAGS_ORDER
98 REVIDX_FLAGS_ORDER
99 REVIDX_RAWTEXT_CHANGING_FLAGS
99 REVIDX_RAWTEXT_CHANGING_FLAGS
100
100
101 parsers = policy.importmod(r'parsers')
101 parsers = policy.importmod(r'parsers')
102 rustancestor = policy.importrust(r'ancestor')
102 rustancestor = policy.importrust(r'ancestor')
103 rustdagop = policy.importrust(r'dagop')
103 rustdagop = policy.importrust(r'dagop')
104
104
105 # Aliased for performance.
105 # Aliased for performance.
106 _zlibdecompress = zlib.decompress
106 _zlibdecompress = zlib.decompress
107
107
108 # max size of revlog with inline data
108 # max size of revlog with inline data
109 _maxinline = 131072
109 _maxinline = 131072
110 _chunksize = 1048576
110 _chunksize = 1048576
111
111
112 # Flag processors for REVIDX_ELLIPSIS.
112 # Flag processors for REVIDX_ELLIPSIS.
113 def ellipsisreadprocessor(rl, text):
113 def ellipsisreadprocessor(rl, text):
114 return text, False
114 return text, False
115
115
116 def ellipsiswriteprocessor(rl, text):
116 def ellipsiswriteprocessor(rl, text):
117 return text, False
117 return text, False
118
118
119 def ellipsisrawprocessor(rl, text):
119 def ellipsisrawprocessor(rl, text):
120 return False
120 return False
121
121
122 ellipsisprocessor = (
122 ellipsisprocessor = (
123 ellipsisreadprocessor,
123 ellipsisreadprocessor,
124 ellipsiswriteprocessor,
124 ellipsiswriteprocessor,
125 ellipsisrawprocessor,
125 ellipsisrawprocessor,
126 )
126 )
127
127
128 def getoffset(q):
128 def getoffset(q):
129 return int(q >> 16)
129 return int(q >> 16)
130
130
131 def gettype(q):
131 def gettype(q):
132 return int(q & 0xFFFF)
132 return int(q & 0xFFFF)
133
133
134 def offset_type(offset, type):
134 def offset_type(offset, type):
135 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
135 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
136 raise ValueError('unknown revlog index flags')
136 raise ValueError('unknown revlog index flags')
137 return int(int(offset) << 16 | type)
137 return int(int(offset) << 16 | type)
138
138
139 @attr.s(slots=True, frozen=True)
139 @attr.s(slots=True, frozen=True)
140 class _revisioninfo(object):
140 class _revisioninfo(object):
141 """Information about a revision that allows building its fulltext
141 """Information about a revision that allows building its fulltext
142 node: expected hash of the revision
142 node: expected hash of the revision
143 p1, p2: parent revs of the revision
143 p1, p2: parent revs of the revision
144 btext: built text cache consisting of a one-element list
144 btext: built text cache consisting of a one-element list
145 cachedelta: (baserev, uncompressed_delta) or None
145 cachedelta: (baserev, uncompressed_delta) or None
146 flags: flags associated to the revision storage
146 flags: flags associated to the revision storage
147
147
148 One of btext[0] or cachedelta must be set.
148 One of btext[0] or cachedelta must be set.
149 """
149 """
150 node = attr.ib()
150 node = attr.ib()
151 p1 = attr.ib()
151 p1 = attr.ib()
152 p2 = attr.ib()
152 p2 = attr.ib()
153 btext = attr.ib()
153 btext = attr.ib()
154 textlen = attr.ib()
154 textlen = attr.ib()
155 cachedelta = attr.ib()
155 cachedelta = attr.ib()
156 flags = attr.ib()
156 flags = attr.ib()
157
157
158 @interfaceutil.implementer(repository.irevisiondelta)
158 @interfaceutil.implementer(repository.irevisiondelta)
159 @attr.s(slots=True)
159 @attr.s(slots=True)
160 class revlogrevisiondelta(object):
160 class revlogrevisiondelta(object):
161 node = attr.ib()
161 node = attr.ib()
162 p1node = attr.ib()
162 p1node = attr.ib()
163 p2node = attr.ib()
163 p2node = attr.ib()
164 basenode = attr.ib()
164 basenode = attr.ib()
165 flags = attr.ib()
165 flags = attr.ib()
166 baserevisionsize = attr.ib()
166 baserevisionsize = attr.ib()
167 revision = attr.ib()
167 revision = attr.ib()
168 delta = attr.ib()
168 delta = attr.ib()
169 linknode = attr.ib(default=None)
169 linknode = attr.ib(default=None)
170
170
171 @interfaceutil.implementer(repository.iverifyproblem)
171 @interfaceutil.implementer(repository.iverifyproblem)
172 @attr.s(frozen=True)
172 @attr.s(frozen=True)
173 class revlogproblem(object):
173 class revlogproblem(object):
174 warning = attr.ib(default=None)
174 warning = attr.ib(default=None)
175 error = attr.ib(default=None)
175 error = attr.ib(default=None)
176 node = attr.ib(default=None)
176 node = attr.ib(default=None)
177
177
178 # index v0:
178 # index v0:
179 # 4 bytes: offset
179 # 4 bytes: offset
180 # 4 bytes: compressed length
180 # 4 bytes: compressed length
181 # 4 bytes: base rev
181 # 4 bytes: base rev
182 # 4 bytes: link rev
182 # 4 bytes: link rev
183 # 20 bytes: parent 1 nodeid
183 # 20 bytes: parent 1 nodeid
184 # 20 bytes: parent 2 nodeid
184 # 20 bytes: parent 2 nodeid
185 # 20 bytes: nodeid
185 # 20 bytes: nodeid
186 indexformatv0 = struct.Struct(">4l20s20s20s")
186 indexformatv0 = struct.Struct(">4l20s20s20s")
187 indexformatv0_pack = indexformatv0.pack
187 indexformatv0_pack = indexformatv0.pack
188 indexformatv0_unpack = indexformatv0.unpack
188 indexformatv0_unpack = indexformatv0.unpack
189
189
190 class revlogoldindex(list):
190 class revlogoldindex(list):
191 def __getitem__(self, i):
191 def __getitem__(self, i):
192 if i == -1:
192 if i == -1:
193 return (0, 0, 0, -1, -1, -1, -1, nullid)
193 return (0, 0, 0, -1, -1, -1, -1, nullid)
194 return list.__getitem__(self, i)
194 return list.__getitem__(self, i)
195
195
196 class revlogoldio(object):
196 class revlogoldio(object):
197 def __init__(self):
197 def __init__(self):
198 self.size = indexformatv0.size
198 self.size = indexformatv0.size
199
199
200 def parseindex(self, data, inline):
200 def parseindex(self, data, inline):
201 s = self.size
201 s = self.size
202 index = []
202 index = []
203 nodemap = {nullid: nullrev}
203 nodemap = {nullid: nullrev}
204 n = off = 0
204 n = off = 0
205 l = len(data)
205 l = len(data)
206 while off + s <= l:
206 while off + s <= l:
207 cur = data[off:off + s]
207 cur = data[off:off + s]
208 off += s
208 off += s
209 e = indexformatv0_unpack(cur)
209 e = indexformatv0_unpack(cur)
210 # transform to revlogv1 format
210 # transform to revlogv1 format
211 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
211 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
212 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
212 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
213 index.append(e2)
213 index.append(e2)
214 nodemap[e[6]] = n
214 nodemap[e[6]] = n
215 n += 1
215 n += 1
216
216
217 return revlogoldindex(index), nodemap, None
217 return revlogoldindex(index), nodemap, None
218
218
219 def packentry(self, entry, node, version, rev):
219 def packentry(self, entry, node, version, rev):
220 if gettype(entry[0]):
220 if gettype(entry[0]):
221 raise error.RevlogError(_('index entry flags need revlog '
221 raise error.RevlogError(_('index entry flags need revlog '
222 'version 1'))
222 'version 1'))
223 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
223 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
224 node(entry[5]), node(entry[6]), entry[7])
224 node(entry[5]), node(entry[6]), entry[7])
225 return indexformatv0_pack(*e2)
225 return indexformatv0_pack(*e2)
226
226
227 # index ng:
227 # index ng:
228 # 6 bytes: offset
228 # 6 bytes: offset
229 # 2 bytes: flags
229 # 2 bytes: flags
230 # 4 bytes: compressed length
230 # 4 bytes: compressed length
231 # 4 bytes: uncompressed length
231 # 4 bytes: uncompressed length
232 # 4 bytes: base rev
232 # 4 bytes: base rev
233 # 4 bytes: link rev
233 # 4 bytes: link rev
234 # 4 bytes: parent 1 rev
234 # 4 bytes: parent 1 rev
235 # 4 bytes: parent 2 rev
235 # 4 bytes: parent 2 rev
236 # 32 bytes: nodeid
236 # 32 bytes: nodeid
237 indexformatng = struct.Struct(">Qiiiiii20s12x")
237 indexformatng = struct.Struct(">Qiiiiii20s12x")
238 indexformatng_pack = indexformatng.pack
238 indexformatng_pack = indexformatng.pack
239 versionformat = struct.Struct(">I")
239 versionformat = struct.Struct(">I")
240 versionformat_pack = versionformat.pack
240 versionformat_pack = versionformat.pack
241 versionformat_unpack = versionformat.unpack
241 versionformat_unpack = versionformat.unpack
242
242
243 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
243 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
244 # signed integer)
244 # signed integer)
245 _maxentrysize = 0x7fffffff
245 _maxentrysize = 0x7fffffff
246
246
247 class revlogio(object):
247 class revlogio(object):
248 def __init__(self):
248 def __init__(self):
249 self.size = indexformatng.size
249 self.size = indexformatng.size
250
250
251 def parseindex(self, data, inline):
251 def parseindex(self, data, inline):
252 # call the C implementation to parse the index data
252 # call the C implementation to parse the index data
253 index, cache = parsers.parse_index2(data, inline)
253 index, cache = parsers.parse_index2(data, inline)
254 return index, getattr(index, 'nodemap', None), cache
254 return index, getattr(index, 'nodemap', None), cache
255
255
256 def packentry(self, entry, node, version, rev):
256 def packentry(self, entry, node, version, rev):
257 p = indexformatng_pack(*entry)
257 p = indexformatng_pack(*entry)
258 if rev == 0:
258 if rev == 0:
259 p = versionformat_pack(version) + p[4:]
259 p = versionformat_pack(version) + p[4:]
260 return p
260 return p
261
261
262 class revlog(object):
262 class revlog(object):
263 """
263 """
264 the underlying revision storage object
264 the underlying revision storage object
265
265
266 A revlog consists of two parts, an index and the revision data.
266 A revlog consists of two parts, an index and the revision data.
267
267
268 The index is a file with a fixed record size containing
268 The index is a file with a fixed record size containing
269 information on each revision, including its nodeid (hash), the
269 information on each revision, including its nodeid (hash), the
270 nodeids of its parents, the position and offset of its data within
270 nodeids of its parents, the position and offset of its data within
271 the data file, and the revision it's based on. Finally, each entry
271 the data file, and the revision it's based on. Finally, each entry
272 contains a linkrev entry that can serve as a pointer to external
272 contains a linkrev entry that can serve as a pointer to external
273 data.
273 data.
274
274
275 The revision data itself is a linear collection of data chunks.
275 The revision data itself is a linear collection of data chunks.
276 Each chunk represents a revision and is usually represented as a
276 Each chunk represents a revision and is usually represented as a
277 delta against the previous chunk. To bound lookup time, runs of
277 delta against the previous chunk. To bound lookup time, runs of
278 deltas are limited to about 2 times the length of the original
278 deltas are limited to about 2 times the length of the original
279 version data. This makes retrieval of a version proportional to
279 version data. This makes retrieval of a version proportional to
280 its size, or O(1) relative to the number of revisions.
280 its size, or O(1) relative to the number of revisions.
281
281
282 Both pieces of the revlog are written to in an append-only
282 Both pieces of the revlog are written to in an append-only
283 fashion, which means we never need to rewrite a file to insert or
283 fashion, which means we never need to rewrite a file to insert or
284 remove data, and can use some simple techniques to avoid the need
284 remove data, and can use some simple techniques to avoid the need
285 for locking while reading.
285 for locking while reading.
286
286
287 If checkambig, indexfile is opened with checkambig=True at
287 If checkambig, indexfile is opened with checkambig=True at
288 writing, to avoid file stat ambiguity.
288 writing, to avoid file stat ambiguity.
289
289
290 If mmaplargeindex is True, and an mmapindexthreshold is set, the
290 If mmaplargeindex is True, and an mmapindexthreshold is set, the
291 index will be mmapped rather than read if it is larger than the
291 index will be mmapped rather than read if it is larger than the
292 configured threshold.
292 configured threshold.
293
293
294 If censorable is True, the revlog can have censored revisions.
294 If censorable is True, the revlog can have censored revisions.
295
295
296 If `upperboundcomp` is not None, this is the expected maximal gain from
296 If `upperboundcomp` is not None, this is the expected maximal gain from
297 compression for the data content.
297 compression for the data content.
298 """
298 """
299 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
299 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
300 mmaplargeindex=False, censorable=False,
300 mmaplargeindex=False, censorable=False,
301 upperboundcomp=None):
301 upperboundcomp=None):
302 """
302 """
303 create a revlog object
303 create a revlog object
304
304
305 opener is a function that abstracts the file opening operation
305 opener is a function that abstracts the file opening operation
306 and can be used to implement COW semantics or the like.
306 and can be used to implement COW semantics or the like.
307
307
308 """
308 """
309 self.upperboundcomp = upperboundcomp
309 self.upperboundcomp = upperboundcomp
310 self.indexfile = indexfile
310 self.indexfile = indexfile
311 self.datafile = datafile or (indexfile[:-2] + ".d")
311 self.datafile = datafile or (indexfile[:-2] + ".d")
312 self.opener = opener
312 self.opener = opener
313 # When True, indexfile is opened with checkambig=True at writing, to
313 # When True, indexfile is opened with checkambig=True at writing, to
314 # avoid file stat ambiguity.
314 # avoid file stat ambiguity.
315 self._checkambig = checkambig
315 self._checkambig = checkambig
316 self._mmaplargeindex = mmaplargeindex
316 self._mmaplargeindex = mmaplargeindex
317 self._censorable = censorable
317 self._censorable = censorable
318 # 3-tuple of (node, rev, text) for a raw revision.
318 # 3-tuple of (node, rev, text) for a raw revision.
319 self._revisioncache = None
319 self._revisioncache = None
320 # Maps rev to chain base rev.
320 # Maps rev to chain base rev.
321 self._chainbasecache = util.lrucachedict(100)
321 self._chainbasecache = util.lrucachedict(100)
322 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
322 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
323 self._chunkcache = (0, '')
323 self._chunkcache = (0, '')
324 # How much data to read and cache into the raw revlog data cache.
324 # How much data to read and cache into the raw revlog data cache.
325 self._chunkcachesize = 65536
325 self._chunkcachesize = 65536
326 self._maxchainlen = None
326 self._maxchainlen = None
327 self._deltabothparents = True
327 self._deltabothparents = True
328 self.index = []
328 self.index = []
329 # Mapping of partial identifiers to full nodes.
329 # Mapping of partial identifiers to full nodes.
330 self._pcache = {}
330 self._pcache = {}
331 # Mapping of revision integer to full node.
331 # Mapping of revision integer to full node.
332 self._nodecache = {nullid: nullrev}
332 self._nodecache = {nullid: nullrev}
333 self._nodepos = None
333 self._nodepos = None
334 self._compengine = 'zlib'
334 self._compengine = 'zlib'
335 self._compengineopts = {}
335 self._compengineopts = {}
336 self._maxdeltachainspan = -1
336 self._maxdeltachainspan = -1
337 self._withsparseread = False
337 self._withsparseread = False
338 self._sparserevlog = False
338 self._sparserevlog = False
339 self._srdensitythreshold = 0.50
339 self._srdensitythreshold = 0.50
340 self._srmingapsize = 262144
340 self._srmingapsize = 262144
341
341
342 # Make copy of flag processors so each revlog instance can support
342 # Make copy of flag processors so each revlog instance can support
343 # custom flags.
343 # custom flags.
344 self._flagprocessors = dict(flagutil.flagprocessors)
344 self._flagprocessors = dict(flagutil.flagprocessors)
345
345
346 # 2-tuple of file handles being used for active writing.
346 # 2-tuple of file handles being used for active writing.
347 self._writinghandles = None
347 self._writinghandles = None
348
348
349 self._loadindex()
349 self._loadindex()
350
350
351 def _loadindex(self):
351 def _loadindex(self):
352 mmapindexthreshold = None
352 mmapindexthreshold = None
353 opts = getattr(self.opener, 'options', {}) or {}
353 opts = getattr(self.opener, 'options', {}) or {}
354
354
355 if 'revlogv2' in opts:
355 if 'revlogv2' in opts:
356 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
356 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
357 elif 'revlogv1' in opts:
357 elif 'revlogv1' in opts:
358 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
358 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
359 if 'generaldelta' in opts:
359 if 'generaldelta' in opts:
360 newversionflags |= FLAG_GENERALDELTA
360 newversionflags |= FLAG_GENERALDELTA
361 elif getattr(self.opener, 'options', None) is not None:
361 elif getattr(self.opener, 'options', None) is not None:
362 # If options provided but no 'revlog*' found, the repository
362 # If options provided but no 'revlog*' found, the repository
363 # would have no 'requires' file in it, which means we have to
363 # would have no 'requires' file in it, which means we have to
364 # stick to the old format.
364 # stick to the old format.
365 newversionflags = REVLOGV0
365 newversionflags = REVLOGV0
366 else:
366 else:
367 newversionflags = REVLOG_DEFAULT_VERSION
367 newversionflags = REVLOG_DEFAULT_VERSION
368
368
369 if 'chunkcachesize' in opts:
369 if 'chunkcachesize' in opts:
370 self._chunkcachesize = opts['chunkcachesize']
370 self._chunkcachesize = opts['chunkcachesize']
371 if 'maxchainlen' in opts:
371 if 'maxchainlen' in opts:
372 self._maxchainlen = opts['maxchainlen']
372 self._maxchainlen = opts['maxchainlen']
373 if 'deltabothparents' in opts:
373 if 'deltabothparents' in opts:
374 self._deltabothparents = opts['deltabothparents']
374 self._deltabothparents = opts['deltabothparents']
375 self._lazydelta = bool(opts.get('lazydelta', True))
375 self._lazydelta = bool(opts.get('lazydelta', True))
376 self._lazydeltabase = False
376 self._lazydeltabase = False
377 if self._lazydelta:
377 if self._lazydelta:
378 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
378 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
379 if 'compengine' in opts:
379 if 'compengine' in opts:
380 self._compengine = opts['compengine']
380 self._compengine = opts['compengine']
381 if 'zlib.level' in opts:
381 if 'zlib.level' in opts:
382 self._compengineopts['zlib.level'] = opts['zlib.level']
382 self._compengineopts['zlib.level'] = opts['zlib.level']
383 if 'zstd.level' in opts:
383 if 'zstd.level' in opts:
384 self._compengineopts['zstd.level'] = opts['zstd.level']
384 self._compengineopts['zstd.level'] = opts['zstd.level']
385 if 'maxdeltachainspan' in opts:
385 if 'maxdeltachainspan' in opts:
386 self._maxdeltachainspan = opts['maxdeltachainspan']
386 self._maxdeltachainspan = opts['maxdeltachainspan']
387 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
387 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
388 mmapindexthreshold = opts['mmapindexthreshold']
388 mmapindexthreshold = opts['mmapindexthreshold']
389 self._sparserevlog = bool(opts.get('sparse-revlog', False))
389 self._sparserevlog = bool(opts.get('sparse-revlog', False))
390 withsparseread = bool(opts.get('with-sparse-read', False))
390 withsparseread = bool(opts.get('with-sparse-read', False))
391 # sparse-revlog forces sparse-read
391 # sparse-revlog forces sparse-read
392 self._withsparseread = self._sparserevlog or withsparseread
392 self._withsparseread = self._sparserevlog or withsparseread
393 if 'sparse-read-density-threshold' in opts:
393 if 'sparse-read-density-threshold' in opts:
394 self._srdensitythreshold = opts['sparse-read-density-threshold']
394 self._srdensitythreshold = opts['sparse-read-density-threshold']
395 if 'sparse-read-min-gap-size' in opts:
395 if 'sparse-read-min-gap-size' in opts:
396 self._srmingapsize = opts['sparse-read-min-gap-size']
396 self._srmingapsize = opts['sparse-read-min-gap-size']
397 if opts.get('enableellipsis'):
397 if opts.get('enableellipsis'):
398 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
398 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
399
399
400 # revlog v0 doesn't have flag processors
400 # revlog v0 doesn't have flag processors
401 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
401 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
402 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
402 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
403
403
404 if self._chunkcachesize <= 0:
404 if self._chunkcachesize <= 0:
405 raise error.RevlogError(_('revlog chunk cache size %r is not '
405 raise error.RevlogError(_('revlog chunk cache size %r is not '
406 'greater than 0') % self._chunkcachesize)
406 'greater than 0') % self._chunkcachesize)
407 elif self._chunkcachesize & (self._chunkcachesize - 1):
407 elif self._chunkcachesize & (self._chunkcachesize - 1):
408 raise error.RevlogError(_('revlog chunk cache size %r is not a '
408 raise error.RevlogError(_('revlog chunk cache size %r is not a '
409 'power of 2') % self._chunkcachesize)
409 'power of 2') % self._chunkcachesize)
410
410
411 indexdata = ''
411 indexdata = ''
412 self._initempty = True
412 self._initempty = True
413 try:
413 try:
414 with self._indexfp() as f:
414 with self._indexfp() as f:
415 if (mmapindexthreshold is not None and
415 if (mmapindexthreshold is not None and
416 self.opener.fstat(f).st_size >= mmapindexthreshold):
416 self.opener.fstat(f).st_size >= mmapindexthreshold):
417 # TODO: should .close() to release resources without
417 # TODO: should .close() to release resources without
418 # relying on Python GC
418 # relying on Python GC
419 indexdata = util.buffer(util.mmapread(f))
419 indexdata = util.buffer(util.mmapread(f))
420 else:
420 else:
421 indexdata = f.read()
421 indexdata = f.read()
422 if len(indexdata) > 0:
422 if len(indexdata) > 0:
423 versionflags = versionformat_unpack(indexdata[:4])[0]
423 versionflags = versionformat_unpack(indexdata[:4])[0]
424 self._initempty = False
424 self._initempty = False
425 else:
425 else:
426 versionflags = newversionflags
426 versionflags = newversionflags
427 except IOError as inst:
427 except IOError as inst:
428 if inst.errno != errno.ENOENT:
428 if inst.errno != errno.ENOENT:
429 raise
429 raise
430
430
431 versionflags = newversionflags
431 versionflags = newversionflags
432
432
433 self.version = versionflags
433 self.version = versionflags
434
434
435 flags = versionflags & ~0xFFFF
435 flags = versionflags & ~0xFFFF
436 fmt = versionflags & 0xFFFF
436 fmt = versionflags & 0xFFFF
437
437
438 if fmt == REVLOGV0:
438 if fmt == REVLOGV0:
439 if flags:
439 if flags:
440 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
440 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
441 'revlog %s') %
441 'revlog %s') %
442 (flags >> 16, fmt, self.indexfile))
442 (flags >> 16, fmt, self.indexfile))
443
443
444 self._inline = False
444 self._inline = False
445 self._generaldelta = False
445 self._generaldelta = False
446
446
447 elif fmt == REVLOGV1:
447 elif fmt == REVLOGV1:
448 if flags & ~REVLOGV1_FLAGS:
448 if flags & ~REVLOGV1_FLAGS:
449 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
449 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
450 'revlog %s') %
450 'revlog %s') %
451 (flags >> 16, fmt, self.indexfile))
451 (flags >> 16, fmt, self.indexfile))
452
452
453 self._inline = versionflags & FLAG_INLINE_DATA
453 self._inline = versionflags & FLAG_INLINE_DATA
454 self._generaldelta = versionflags & FLAG_GENERALDELTA
454 self._generaldelta = versionflags & FLAG_GENERALDELTA
455
455
456 elif fmt == REVLOGV2:
456 elif fmt == REVLOGV2:
457 if flags & ~REVLOGV2_FLAGS:
457 if flags & ~REVLOGV2_FLAGS:
458 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
458 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
459 'revlog %s') %
459 'revlog %s') %
460 (flags >> 16, fmt, self.indexfile))
460 (flags >> 16, fmt, self.indexfile))
461
461
462 self._inline = versionflags & FLAG_INLINE_DATA
462 self._inline = versionflags & FLAG_INLINE_DATA
463 # generaldelta implied by version 2 revlogs.
463 # generaldelta implied by version 2 revlogs.
464 self._generaldelta = True
464 self._generaldelta = True
465
465
466 else:
466 else:
467 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
467 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
468 (fmt, self.indexfile))
468 (fmt, self.indexfile))
469 # sparse-revlog can't be on without general-delta (issue6056)
469 # sparse-revlog can't be on without general-delta (issue6056)
470 if not self._generaldelta:
470 if not self._generaldelta:
471 self._sparserevlog = False
471 self._sparserevlog = False
472
472
473 self._storedeltachains = True
473 self._storedeltachains = True
474
474
475 self._io = revlogio()
475 self._io = revlogio()
476 if self.version == REVLOGV0:
476 if self.version == REVLOGV0:
477 self._io = revlogoldio()
477 self._io = revlogoldio()
478 try:
478 try:
479 d = self._io.parseindex(indexdata, self._inline)
479 d = self._io.parseindex(indexdata, self._inline)
480 except (ValueError, IndexError):
480 except (ValueError, IndexError):
481 raise error.RevlogError(_("index %s is corrupted") %
481 raise error.RevlogError(_("index %s is corrupted") %
482 self.indexfile)
482 self.indexfile)
483 self.index, nodemap, self._chunkcache = d
483 self.index, nodemap, self._chunkcache = d
484 if nodemap is not None:
484 if nodemap is not None:
485 self.nodemap = self._nodecache = nodemap
485 self.nodemap = self._nodecache = nodemap
486 if not self._chunkcache:
486 if not self._chunkcache:
487 self._chunkclear()
487 self._chunkclear()
488 # revnum -> (chain-length, sum-delta-length)
488 # revnum -> (chain-length, sum-delta-length)
489 self._chaininfocache = {}
489 self._chaininfocache = {}
490 # revlog header -> revlog compressor
490 # revlog header -> revlog compressor
491 self._decompressors = {}
491 self._decompressors = {}
492
492
493 @util.propertycache
493 @util.propertycache
494 def _compressor(self):
494 def _compressor(self):
495 engine = util.compengines[self._compengine]
495 engine = util.compengines[self._compengine]
496 return engine.revlogcompressor(self._compengineopts)
496 return engine.revlogcompressor(self._compengineopts)
497
497
498 def _indexfp(self, mode='r'):
498 def _indexfp(self, mode='r'):
499 """file object for the revlog's index file"""
499 """file object for the revlog's index file"""
500 args = {r'mode': mode}
500 args = {r'mode': mode}
501 if mode != 'r':
501 if mode != 'r':
502 args[r'checkambig'] = self._checkambig
502 args[r'checkambig'] = self._checkambig
503 if mode == 'w':
503 if mode == 'w':
504 args[r'atomictemp'] = True
504 args[r'atomictemp'] = True
505 return self.opener(self.indexfile, **args)
505 return self.opener(self.indexfile, **args)
506
506
507 def _datafp(self, mode='r'):
507 def _datafp(self, mode='r'):
508 """file object for the revlog's data file"""
508 """file object for the revlog's data file"""
509 return self.opener(self.datafile, mode=mode)
509 return self.opener(self.datafile, mode=mode)
510
510
511 @contextlib.contextmanager
511 @contextlib.contextmanager
512 def _datareadfp(self, existingfp=None):
512 def _datareadfp(self, existingfp=None):
513 """file object suitable to read data"""
513 """file object suitable to read data"""
514 # Use explicit file handle, if given.
514 # Use explicit file handle, if given.
515 if existingfp is not None:
515 if existingfp is not None:
516 yield existingfp
516 yield existingfp
517
517
518 # Use a file handle being actively used for writes, if available.
518 # Use a file handle being actively used for writes, if available.
519 # There is some danger to doing this because reads will seek the
519 # There is some danger to doing this because reads will seek the
520 # file. However, _writeentry() performs a SEEK_END before all writes,
520 # file. However, _writeentry() performs a SEEK_END before all writes,
521 # so we should be safe.
521 # so we should be safe.
522 elif self._writinghandles:
522 elif self._writinghandles:
523 if self._inline:
523 if self._inline:
524 yield self._writinghandles[0]
524 yield self._writinghandles[0]
525 else:
525 else:
526 yield self._writinghandles[1]
526 yield self._writinghandles[1]
527
527
528 # Otherwise open a new file handle.
528 # Otherwise open a new file handle.
529 else:
529 else:
530 if self._inline:
530 if self._inline:
531 func = self._indexfp
531 func = self._indexfp
532 else:
532 else:
533 func = self._datafp
533 func = self._datafp
534 with func() as fp:
534 with func() as fp:
535 yield fp
535 yield fp
536
536
537 def tip(self):
537 def tip(self):
538 return self.node(len(self.index) - 1)
538 return self.node(len(self.index) - 1)
539 def __contains__(self, rev):
539 def __contains__(self, rev):
540 return 0 <= rev < len(self)
540 return 0 <= rev < len(self)
541 def __len__(self):
541 def __len__(self):
542 return len(self.index)
542 return len(self.index)
543 def __iter__(self):
543 def __iter__(self):
544 return iter(pycompat.xrange(len(self)))
544 return iter(pycompat.xrange(len(self)))
545 def revs(self, start=0, stop=None):
545 def revs(self, start=0, stop=None):
546 """iterate over all rev in this revlog (from start to stop)"""
546 """iterate over all rev in this revlog (from start to stop)"""
547 return storageutil.iterrevs(len(self), start=start, stop=stop)
547 return storageutil.iterrevs(len(self), start=start, stop=stop)
548
548
549 @util.propertycache
549 @util.propertycache
550 def nodemap(self):
550 def nodemap(self):
551 if self.index:
551 if self.index:
552 # populate mapping down to the initial node
552 # populate mapping down to the initial node
553 node0 = self.index[0][7] # get around changelog filtering
553 node0 = self.index[0][7] # get around changelog filtering
554 self.rev(node0)
554 self.rev(node0)
555 return self._nodecache
555 return self._nodecache
556
556
557 def hasnode(self, node):
557 def hasnode(self, node):
558 try:
558 try:
559 self.rev(node)
559 self.rev(node)
560 return True
560 return True
561 except KeyError:
561 except KeyError:
562 return False
562 return False
563
563
564 def candelta(self, baserev, rev):
564 def candelta(self, baserev, rev):
565 """whether two revisions (baserev, rev) can be delta-ed or not"""
565 """whether two revisions (baserev, rev) can be delta-ed or not"""
566 # Disable delta if either rev requires a content-changing flag
566 # Disable delta if either rev requires a content-changing flag
567 # processor (ex. LFS). This is because such flag processor can alter
567 # processor (ex. LFS). This is because such flag processor can alter
568 # the rawtext content that the delta will be based on, and two clients
568 # the rawtext content that the delta will be based on, and two clients
569 # could have a same revlog node with different flags (i.e. different
569 # could have a same revlog node with different flags (i.e. different
570 # rawtext contents) and the delta could be incompatible.
570 # rawtext contents) and the delta could be incompatible.
571 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
571 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
572 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
572 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
573 return False
573 return False
574 return True
574 return True
575
575
576 def clearcaches(self):
576 def clearcaches(self):
577 self._revisioncache = None
577 self._revisioncache = None
578 self._chainbasecache.clear()
578 self._chainbasecache.clear()
579 self._chunkcache = (0, '')
579 self._chunkcache = (0, '')
580 self._pcache = {}
580 self._pcache = {}
581
581
582 try:
582 try:
583 # If we are using the native C version, you are in a fun case
583 # If we are using the native C version, you are in a fun case
584 # where self.index, self.nodemap and self._nodecaches is the same
584 # where self.index, self.nodemap and self._nodecaches is the same
585 # object.
585 # object.
586 self._nodecache.clearcaches()
586 self._nodecache.clearcaches()
587 except AttributeError:
587 except AttributeError:
588 self._nodecache = {nullid: nullrev}
588 self._nodecache = {nullid: nullrev}
589 self._nodepos = None
589 self._nodepos = None
590
590
591 def rev(self, node):
591 def rev(self, node):
592 try:
592 try:
593 return self._nodecache[node]
593 return self._nodecache[node]
594 except TypeError:
594 except TypeError:
595 raise
595 raise
596 except error.RevlogError:
596 except error.RevlogError:
597 # parsers.c radix tree lookup failed
597 # parsers.c radix tree lookup failed
598 if node == wdirid or node in wdirfilenodeids:
598 if node == wdirid or node in wdirfilenodeids:
599 raise error.WdirUnsupported
599 raise error.WdirUnsupported
600 raise error.LookupError(node, self.indexfile, _('no node'))
600 raise error.LookupError(node, self.indexfile, _('no node'))
601 except KeyError:
601 except KeyError:
602 # pure python cache lookup failed
602 # pure python cache lookup failed
603 n = self._nodecache
603 n = self._nodecache
604 i = self.index
604 i = self.index
605 p = self._nodepos
605 p = self._nodepos
606 if p is None:
606 if p is None:
607 p = len(i) - 1
607 p = len(i) - 1
608 else:
608 else:
609 assert p < len(i)
609 assert p < len(i)
610 for r in pycompat.xrange(p, -1, -1):
610 for r in pycompat.xrange(p, -1, -1):
611 v = i[r][7]
611 v = i[r][7]
612 n[v] = r
612 n[v] = r
613 if v == node:
613 if v == node:
614 self._nodepos = r - 1
614 self._nodepos = r - 1
615 return r
615 return r
616 if node == wdirid or node in wdirfilenodeids:
616 if node == wdirid or node in wdirfilenodeids:
617 raise error.WdirUnsupported
617 raise error.WdirUnsupported
618 raise error.LookupError(node, self.indexfile, _('no node'))
618 raise error.LookupError(node, self.indexfile, _('no node'))
619
619
620 # Accessors for index entries.
620 # Accessors for index entries.
621
621
622 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
622 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
623 # are flags.
623 # are flags.
624 def start(self, rev):
624 def start(self, rev):
625 return int(self.index[rev][0] >> 16)
625 return int(self.index[rev][0] >> 16)
626
626
627 def flags(self, rev):
627 def flags(self, rev):
628 return self.index[rev][0] & 0xFFFF
628 return self.index[rev][0] & 0xFFFF
629
629
630 def length(self, rev):
630 def length(self, rev):
631 return self.index[rev][1]
631 return self.index[rev][1]
632
632
633 def rawsize(self, rev):
633 def rawsize(self, rev):
634 """return the length of the uncompressed text for a given revision"""
634 """return the length of the uncompressed text for a given revision"""
635 l = self.index[rev][2]
635 l = self.index[rev][2]
636 if l >= 0:
636 if l >= 0:
637 return l
637 return l
638
638
639 t = self.rawdata(rev)
639 t = self.rawdata(rev)
640 return len(t)
640 return len(t)
641
641
642 def size(self, rev):
642 def size(self, rev):
643 """length of non-raw text (processed by a "read" flag processor)"""
643 """length of non-raw text (processed by a "read" flag processor)"""
644 # fast path: if no "read" flag processor could change the content,
644 # fast path: if no "read" flag processor could change the content,
645 # size is rawsize. note: ELLIPSIS is known to not change the content.
645 # size is rawsize. note: ELLIPSIS is known to not change the content.
646 flags = self.flags(rev)
646 flags = self.flags(rev)
647 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
647 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
648 return self.rawsize(rev)
648 return self.rawsize(rev)
649
649
650 return len(self.revision(rev, raw=False))
650 return len(self.revision(rev, raw=False))
651
651
652 def chainbase(self, rev):
652 def chainbase(self, rev):
653 base = self._chainbasecache.get(rev)
653 base = self._chainbasecache.get(rev)
654 if base is not None:
654 if base is not None:
655 return base
655 return base
656
656
657 index = self.index
657 index = self.index
658 iterrev = rev
658 iterrev = rev
659 base = index[iterrev][3]
659 base = index[iterrev][3]
660 while base != iterrev:
660 while base != iterrev:
661 iterrev = base
661 iterrev = base
662 base = index[iterrev][3]
662 base = index[iterrev][3]
663
663
664 self._chainbasecache[rev] = base
664 self._chainbasecache[rev] = base
665 return base
665 return base
666
666
667 def linkrev(self, rev):
667 def linkrev(self, rev):
668 return self.index[rev][4]
668 return self.index[rev][4]
669
669
670 def parentrevs(self, rev):
670 def parentrevs(self, rev):
671 try:
671 try:
672 entry = self.index[rev]
672 entry = self.index[rev]
673 except IndexError:
673 except IndexError:
674 if rev == wdirrev:
674 if rev == wdirrev:
675 raise error.WdirUnsupported
675 raise error.WdirUnsupported
676 raise
676 raise
677
677
678 return entry[5], entry[6]
678 return entry[5], entry[6]
679
679
680 # fast parentrevs(rev) where rev isn't filtered
680 # fast parentrevs(rev) where rev isn't filtered
681 _uncheckedparentrevs = parentrevs
681 _uncheckedparentrevs = parentrevs
682
682
683 def node(self, rev):
683 def node(self, rev):
684 try:
684 try:
685 return self.index[rev][7]
685 return self.index[rev][7]
686 except IndexError:
686 except IndexError:
687 if rev == wdirrev:
687 if rev == wdirrev:
688 raise error.WdirUnsupported
688 raise error.WdirUnsupported
689 raise
689 raise
690
690
691 # Derived from index values.
691 # Derived from index values.
692
692
693 def end(self, rev):
693 def end(self, rev):
694 return self.start(rev) + self.length(rev)
694 return self.start(rev) + self.length(rev)
695
695
696 def parents(self, node):
696 def parents(self, node):
697 i = self.index
697 i = self.index
698 d = i[self.rev(node)]
698 d = i[self.rev(node)]
699 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
699 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
700
700
701 def chainlen(self, rev):
701 def chainlen(self, rev):
702 return self._chaininfo(rev)[0]
702 return self._chaininfo(rev)[0]
703
703
704 def _chaininfo(self, rev):
704 def _chaininfo(self, rev):
705 chaininfocache = self._chaininfocache
705 chaininfocache = self._chaininfocache
706 if rev in chaininfocache:
706 if rev in chaininfocache:
707 return chaininfocache[rev]
707 return chaininfocache[rev]
708 index = self.index
708 index = self.index
709 generaldelta = self._generaldelta
709 generaldelta = self._generaldelta
710 iterrev = rev
710 iterrev = rev
711 e = index[iterrev]
711 e = index[iterrev]
712 clen = 0
712 clen = 0
713 compresseddeltalen = 0
713 compresseddeltalen = 0
714 while iterrev != e[3]:
714 while iterrev != e[3]:
715 clen += 1
715 clen += 1
716 compresseddeltalen += e[1]
716 compresseddeltalen += e[1]
717 if generaldelta:
717 if generaldelta:
718 iterrev = e[3]
718 iterrev = e[3]
719 else:
719 else:
720 iterrev -= 1
720 iterrev -= 1
721 if iterrev in chaininfocache:
721 if iterrev in chaininfocache:
722 t = chaininfocache[iterrev]
722 t = chaininfocache[iterrev]
723 clen += t[0]
723 clen += t[0]
724 compresseddeltalen += t[1]
724 compresseddeltalen += t[1]
725 break
725 break
726 e = index[iterrev]
726 e = index[iterrev]
727 else:
727 else:
728 # Add text length of base since decompressing that also takes
728 # Add text length of base since decompressing that also takes
729 # work. For cache hits the length is already included.
729 # work. For cache hits the length is already included.
730 compresseddeltalen += e[1]
730 compresseddeltalen += e[1]
731 r = (clen, compresseddeltalen)
731 r = (clen, compresseddeltalen)
732 chaininfocache[rev] = r
732 chaininfocache[rev] = r
733 return r
733 return r
734
734
735 def _deltachain(self, rev, stoprev=None):
735 def _deltachain(self, rev, stoprev=None):
736 """Obtain the delta chain for a revision.
736 """Obtain the delta chain for a revision.
737
737
738 ``stoprev`` specifies a revision to stop at. If not specified, we
738 ``stoprev`` specifies a revision to stop at. If not specified, we
739 stop at the base of the chain.
739 stop at the base of the chain.
740
740
741 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
741 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
742 revs in ascending order and ``stopped`` is a bool indicating whether
742 revs in ascending order and ``stopped`` is a bool indicating whether
743 ``stoprev`` was hit.
743 ``stoprev`` was hit.
744 """
744 """
745 # Try C implementation.
745 # Try C implementation.
746 try:
746 try:
747 return self.index.deltachain(rev, stoprev, self._generaldelta)
747 return self.index.deltachain(rev, stoprev, self._generaldelta)
748 except AttributeError:
748 except AttributeError:
749 pass
749 pass
750
750
751 chain = []
751 chain = []
752
752
753 # Alias to prevent attribute lookup in tight loop.
753 # Alias to prevent attribute lookup in tight loop.
754 index = self.index
754 index = self.index
755 generaldelta = self._generaldelta
755 generaldelta = self._generaldelta
756
756
757 iterrev = rev
757 iterrev = rev
758 e = index[iterrev]
758 e = index[iterrev]
759 while iterrev != e[3] and iterrev != stoprev:
759 while iterrev != e[3] and iterrev != stoprev:
760 chain.append(iterrev)
760 chain.append(iterrev)
761 if generaldelta:
761 if generaldelta:
762 iterrev = e[3]
762 iterrev = e[3]
763 else:
763 else:
764 iterrev -= 1
764 iterrev -= 1
765 e = index[iterrev]
765 e = index[iterrev]
766
766
767 if iterrev == stoprev:
767 if iterrev == stoprev:
768 stopped = True
768 stopped = True
769 else:
769 else:
770 chain.append(iterrev)
770 chain.append(iterrev)
771 stopped = False
771 stopped = False
772
772
773 chain.reverse()
773 chain.reverse()
774 return chain, stopped
774 return chain, stopped
775
775
776 def ancestors(self, revs, stoprev=0, inclusive=False):
776 def ancestors(self, revs, stoprev=0, inclusive=False):
777 """Generate the ancestors of 'revs' in reverse revision order.
777 """Generate the ancestors of 'revs' in reverse revision order.
778 Does not generate revs lower than stoprev.
778 Does not generate revs lower than stoprev.
779
779
780 See the documentation for ancestor.lazyancestors for more details."""
780 See the documentation for ancestor.lazyancestors for more details."""
781
781
782 # first, make sure start revisions aren't filtered
782 # first, make sure start revisions aren't filtered
783 revs = list(revs)
783 revs = list(revs)
784 checkrev = self.node
784 checkrev = self.node
785 for r in revs:
785 for r in revs:
786 checkrev(r)
786 checkrev(r)
787 # and we're sure ancestors aren't filtered as well
787 # and we're sure ancestors aren't filtered as well
788
788
789 if rustancestor is not None:
789 if rustancestor is not None:
790 lazyancestors = rustancestor.LazyAncestors
790 lazyancestors = rustancestor.LazyAncestors
791 arg = self.index
791 arg = self.index
792 elif util.safehasattr(parsers, 'rustlazyancestors'):
792 elif util.safehasattr(parsers, 'rustlazyancestors'):
793 lazyancestors = ancestor.rustlazyancestors
793 lazyancestors = ancestor.rustlazyancestors
794 arg = self.index
794 arg = self.index
795 else:
795 else:
796 lazyancestors = ancestor.lazyancestors
796 lazyancestors = ancestor.lazyancestors
797 arg = self._uncheckedparentrevs
797 arg = self._uncheckedparentrevs
798 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
798 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
799
799
800 def descendants(self, revs):
800 def descendants(self, revs):
801 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
801 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
802
802
803 def findcommonmissing(self, common=None, heads=None):
803 def findcommonmissing(self, common=None, heads=None):
804 """Return a tuple of the ancestors of common and the ancestors of heads
804 """Return a tuple of the ancestors of common and the ancestors of heads
805 that are not ancestors of common. In revset terminology, we return the
805 that are not ancestors of common. In revset terminology, we return the
806 tuple:
806 tuple:
807
807
808 ::common, (::heads) - (::common)
808 ::common, (::heads) - (::common)
809
809
810 The list is sorted by revision number, meaning it is
810 The list is sorted by revision number, meaning it is
811 topologically sorted.
811 topologically sorted.
812
812
813 'heads' and 'common' are both lists of node IDs. If heads is
813 'heads' and 'common' are both lists of node IDs. If heads is
814 not supplied, uses all of the revlog's heads. If common is not
814 not supplied, uses all of the revlog's heads. If common is not
815 supplied, uses nullid."""
815 supplied, uses nullid."""
816 if common is None:
816 if common is None:
817 common = [nullid]
817 common = [nullid]
818 if heads is None:
818 if heads is None:
819 heads = self.heads()
819 heads = self.heads()
820
820
821 common = [self.rev(n) for n in common]
821 common = [self.rev(n) for n in common]
822 heads = [self.rev(n) for n in heads]
822 heads = [self.rev(n) for n in heads]
823
823
824 # we want the ancestors, but inclusive
824 # we want the ancestors, but inclusive
825 class lazyset(object):
825 class lazyset(object):
826 def __init__(self, lazyvalues):
826 def __init__(self, lazyvalues):
827 self.addedvalues = set()
827 self.addedvalues = set()
828 self.lazyvalues = lazyvalues
828 self.lazyvalues = lazyvalues
829
829
830 def __contains__(self, value):
830 def __contains__(self, value):
831 return value in self.addedvalues or value in self.lazyvalues
831 return value in self.addedvalues or value in self.lazyvalues
832
832
833 def __iter__(self):
833 def __iter__(self):
834 added = self.addedvalues
834 added = self.addedvalues
835 for r in added:
835 for r in added:
836 yield r
836 yield r
837 for r in self.lazyvalues:
837 for r in self.lazyvalues:
838 if not r in added:
838 if not r in added:
839 yield r
839 yield r
840
840
841 def add(self, value):
841 def add(self, value):
842 self.addedvalues.add(value)
842 self.addedvalues.add(value)
843
843
844 def update(self, values):
844 def update(self, values):
845 self.addedvalues.update(values)
845 self.addedvalues.update(values)
846
846
847 has = lazyset(self.ancestors(common))
847 has = lazyset(self.ancestors(common))
848 has.add(nullrev)
848 has.add(nullrev)
849 has.update(common)
849 has.update(common)
850
850
851 # take all ancestors from heads that aren't in has
851 # take all ancestors from heads that aren't in has
852 missing = set()
852 missing = set()
853 visit = collections.deque(r for r in heads if r not in has)
853 visit = collections.deque(r for r in heads if r not in has)
854 while visit:
854 while visit:
855 r = visit.popleft()
855 r = visit.popleft()
856 if r in missing:
856 if r in missing:
857 continue
857 continue
858 else:
858 else:
859 missing.add(r)
859 missing.add(r)
860 for p in self.parentrevs(r):
860 for p in self.parentrevs(r):
861 if p not in has:
861 if p not in has:
862 visit.append(p)
862 visit.append(p)
863 missing = list(missing)
863 missing = list(missing)
864 missing.sort()
864 missing.sort()
865 return has, [self.node(miss) for miss in missing]
865 return has, [self.node(miss) for miss in missing]
866
866
867 def incrementalmissingrevs(self, common=None):
867 def incrementalmissingrevs(self, common=None):
868 """Return an object that can be used to incrementally compute the
868 """Return an object that can be used to incrementally compute the
869 revision numbers of the ancestors of arbitrary sets that are not
869 revision numbers of the ancestors of arbitrary sets that are not
870 ancestors of common. This is an ancestor.incrementalmissingancestors
870 ancestors of common. This is an ancestor.incrementalmissingancestors
871 object.
871 object.
872
872
873 'common' is a list of revision numbers. If common is not supplied, uses
873 'common' is a list of revision numbers. If common is not supplied, uses
874 nullrev.
874 nullrev.
875 """
875 """
876 if common is None:
876 if common is None:
877 common = [nullrev]
877 common = [nullrev]
878
878
879 if rustancestor is not None:
879 if rustancestor is not None:
880 return rustancestor.MissingAncestors(self.index, common)
880 return rustancestor.MissingAncestors(self.index, common)
881 return ancestor.incrementalmissingancestors(self.parentrevs, common)
881 return ancestor.incrementalmissingancestors(self.parentrevs, common)
882
882
883 def findmissingrevs(self, common=None, heads=None):
883 def findmissingrevs(self, common=None, heads=None):
884 """Return the revision numbers of the ancestors of heads that
884 """Return the revision numbers of the ancestors of heads that
885 are not ancestors of common.
885 are not ancestors of common.
886
886
887 More specifically, return a list of revision numbers corresponding to
887 More specifically, return a list of revision numbers corresponding to
888 nodes N such that every N satisfies the following constraints:
888 nodes N such that every N satisfies the following constraints:
889
889
890 1. N is an ancestor of some node in 'heads'
890 1. N is an ancestor of some node in 'heads'
891 2. N is not an ancestor of any node in 'common'
891 2. N is not an ancestor of any node in 'common'
892
892
893 The list is sorted by revision number, meaning it is
893 The list is sorted by revision number, meaning it is
894 topologically sorted.
894 topologically sorted.
895
895
896 'heads' and 'common' are both lists of revision numbers. If heads is
896 'heads' and 'common' are both lists of revision numbers. If heads is
897 not supplied, uses all of the revlog's heads. If common is not
897 not supplied, uses all of the revlog's heads. If common is not
898 supplied, uses nullid."""
898 supplied, uses nullid."""
899 if common is None:
899 if common is None:
900 common = [nullrev]
900 common = [nullrev]
901 if heads is None:
901 if heads is None:
902 heads = self.headrevs()
902 heads = self.headrevs()
903
903
904 inc = self.incrementalmissingrevs(common=common)
904 inc = self.incrementalmissingrevs(common=common)
905 return inc.missingancestors(heads)
905 return inc.missingancestors(heads)
906
906
907 def findmissing(self, common=None, heads=None):
907 def findmissing(self, common=None, heads=None):
908 """Return the ancestors of heads that are not ancestors of common.
908 """Return the ancestors of heads that are not ancestors of common.
909
909
910 More specifically, return a list of nodes N such that every N
910 More specifically, return a list of nodes N such that every N
911 satisfies the following constraints:
911 satisfies the following constraints:
912
912
913 1. N is an ancestor of some node in 'heads'
913 1. N is an ancestor of some node in 'heads'
914 2. N is not an ancestor of any node in 'common'
914 2. N is not an ancestor of any node in 'common'
915
915
916 The list is sorted by revision number, meaning it is
916 The list is sorted by revision number, meaning it is
917 topologically sorted.
917 topologically sorted.
918
918
919 'heads' and 'common' are both lists of node IDs. If heads is
919 'heads' and 'common' are both lists of node IDs. If heads is
920 not supplied, uses all of the revlog's heads. If common is not
920 not supplied, uses all of the revlog's heads. If common is not
921 supplied, uses nullid."""
921 supplied, uses nullid."""
922 if common is None:
922 if common is None:
923 common = [nullid]
923 common = [nullid]
924 if heads is None:
924 if heads is None:
925 heads = self.heads()
925 heads = self.heads()
926
926
927 common = [self.rev(n) for n in common]
927 common = [self.rev(n) for n in common]
928 heads = [self.rev(n) for n in heads]
928 heads = [self.rev(n) for n in heads]
929
929
930 inc = self.incrementalmissingrevs(common=common)
930 inc = self.incrementalmissingrevs(common=common)
931 return [self.node(r) for r in inc.missingancestors(heads)]
931 return [self.node(r) for r in inc.missingancestors(heads)]
932
932
933 def nodesbetween(self, roots=None, heads=None):
933 def nodesbetween(self, roots=None, heads=None):
934 """Return a topological path from 'roots' to 'heads'.
934 """Return a topological path from 'roots' to 'heads'.
935
935
936 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
936 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
937 topologically sorted list of all nodes N that satisfy both of
937 topologically sorted list of all nodes N that satisfy both of
938 these constraints:
938 these constraints:
939
939
940 1. N is a descendant of some node in 'roots'
940 1. N is a descendant of some node in 'roots'
941 2. N is an ancestor of some node in 'heads'
941 2. N is an ancestor of some node in 'heads'
942
942
943 Every node is considered to be both a descendant and an ancestor
943 Every node is considered to be both a descendant and an ancestor
944 of itself, so every reachable node in 'roots' and 'heads' will be
944 of itself, so every reachable node in 'roots' and 'heads' will be
945 included in 'nodes'.
945 included in 'nodes'.
946
946
947 'outroots' is the list of reachable nodes in 'roots', i.e., the
947 'outroots' is the list of reachable nodes in 'roots', i.e., the
948 subset of 'roots' that is returned in 'nodes'. Likewise,
948 subset of 'roots' that is returned in 'nodes'. Likewise,
949 'outheads' is the subset of 'heads' that is also in 'nodes'.
949 'outheads' is the subset of 'heads' that is also in 'nodes'.
950
950
951 'roots' and 'heads' are both lists of node IDs. If 'roots' is
951 'roots' and 'heads' are both lists of node IDs. If 'roots' is
952 unspecified, uses nullid as the only root. If 'heads' is
952 unspecified, uses nullid as the only root. If 'heads' is
953 unspecified, uses list of all of the revlog's heads."""
953 unspecified, uses list of all of the revlog's heads."""
954 nonodes = ([], [], [])
954 nonodes = ([], [], [])
955 if roots is not None:
955 if roots is not None:
956 roots = list(roots)
956 roots = list(roots)
957 if not roots:
957 if not roots:
958 return nonodes
958 return nonodes
959 lowestrev = min([self.rev(n) for n in roots])
959 lowestrev = min([self.rev(n) for n in roots])
960 else:
960 else:
961 roots = [nullid] # Everybody's a descendant of nullid
961 roots = [nullid] # Everybody's a descendant of nullid
962 lowestrev = nullrev
962 lowestrev = nullrev
963 if (lowestrev == nullrev) and (heads is None):
963 if (lowestrev == nullrev) and (heads is None):
964 # We want _all_ the nodes!
964 # We want _all_ the nodes!
965 return ([self.node(r) for r in self], [nullid], list(self.heads()))
965 return ([self.node(r) for r in self], [nullid], list(self.heads()))
966 if heads is None:
966 if heads is None:
967 # All nodes are ancestors, so the latest ancestor is the last
967 # All nodes are ancestors, so the latest ancestor is the last
968 # node.
968 # node.
969 highestrev = len(self) - 1
969 highestrev = len(self) - 1
970 # Set ancestors to None to signal that every node is an ancestor.
970 # Set ancestors to None to signal that every node is an ancestor.
971 ancestors = None
971 ancestors = None
972 # Set heads to an empty dictionary for later discovery of heads
972 # Set heads to an empty dictionary for later discovery of heads
973 heads = {}
973 heads = {}
974 else:
974 else:
975 heads = list(heads)
975 heads = list(heads)
976 if not heads:
976 if not heads:
977 return nonodes
977 return nonodes
978 ancestors = set()
978 ancestors = set()
979 # Turn heads into a dictionary so we can remove 'fake' heads.
979 # Turn heads into a dictionary so we can remove 'fake' heads.
980 # Also, later we will be using it to filter out the heads we can't
980 # Also, later we will be using it to filter out the heads we can't
981 # find from roots.
981 # find from roots.
982 heads = dict.fromkeys(heads, False)
982 heads = dict.fromkeys(heads, False)
983 # Start at the top and keep marking parents until we're done.
983 # Start at the top and keep marking parents until we're done.
984 nodestotag = set(heads)
984 nodestotag = set(heads)
985 # Remember where the top was so we can use it as a limit later.
985 # Remember where the top was so we can use it as a limit later.
986 highestrev = max([self.rev(n) for n in nodestotag])
986 highestrev = max([self.rev(n) for n in nodestotag])
987 while nodestotag:
987 while nodestotag:
988 # grab a node to tag
988 # grab a node to tag
989 n = nodestotag.pop()
989 n = nodestotag.pop()
990 # Never tag nullid
990 # Never tag nullid
991 if n == nullid:
991 if n == nullid:
992 continue
992 continue
993 # A node's revision number represents its place in a
993 # A node's revision number represents its place in a
994 # topologically sorted list of nodes.
994 # topologically sorted list of nodes.
995 r = self.rev(n)
995 r = self.rev(n)
996 if r >= lowestrev:
996 if r >= lowestrev:
997 if n not in ancestors:
997 if n not in ancestors:
998 # If we are possibly a descendant of one of the roots
998 # If we are possibly a descendant of one of the roots
999 # and we haven't already been marked as an ancestor
999 # and we haven't already been marked as an ancestor
1000 ancestors.add(n) # Mark as ancestor
1000 ancestors.add(n) # Mark as ancestor
1001 # Add non-nullid parents to list of nodes to tag.
1001 # Add non-nullid parents to list of nodes to tag.
1002 nodestotag.update([p for p in self.parents(n) if
1002 nodestotag.update([p for p in self.parents(n) if
1003 p != nullid])
1003 p != nullid])
1004 elif n in heads: # We've seen it before, is it a fake head?
1004 elif n in heads: # We've seen it before, is it a fake head?
1005 # So it is, real heads should not be the ancestors of
1005 # So it is, real heads should not be the ancestors of
1006 # any other heads.
1006 # any other heads.
1007 heads.pop(n)
1007 heads.pop(n)
1008 if not ancestors:
1008 if not ancestors:
1009 return nonodes
1009 return nonodes
1010 # Now that we have our set of ancestors, we want to remove any
1010 # Now that we have our set of ancestors, we want to remove any
1011 # roots that are not ancestors.
1011 # roots that are not ancestors.
1012
1012
1013 # If one of the roots was nullid, everything is included anyway.
1013 # If one of the roots was nullid, everything is included anyway.
1014 if lowestrev > nullrev:
1014 if lowestrev > nullrev:
1015 # But, since we weren't, let's recompute the lowest rev to not
1015 # But, since we weren't, let's recompute the lowest rev to not
1016 # include roots that aren't ancestors.
1016 # include roots that aren't ancestors.
1017
1017
1018 # Filter out roots that aren't ancestors of heads
1018 # Filter out roots that aren't ancestors of heads
1019 roots = [root for root in roots if root in ancestors]
1019 roots = [root for root in roots if root in ancestors]
1020 # Recompute the lowest revision
1020 # Recompute the lowest revision
1021 if roots:
1021 if roots:
1022 lowestrev = min([self.rev(root) for root in roots])
1022 lowestrev = min([self.rev(root) for root in roots])
1023 else:
1023 else:
1024 # No more roots? Return empty list
1024 # No more roots? Return empty list
1025 return nonodes
1025 return nonodes
1026 else:
1026 else:
1027 # We are descending from nullid, and don't need to care about
1027 # We are descending from nullid, and don't need to care about
1028 # any other roots.
1028 # any other roots.
1029 lowestrev = nullrev
1029 lowestrev = nullrev
1030 roots = [nullid]
1030 roots = [nullid]
1031 # Transform our roots list into a set.
1031 # Transform our roots list into a set.
1032 descendants = set(roots)
1032 descendants = set(roots)
1033 # Also, keep the original roots so we can filter out roots that aren't
1033 # Also, keep the original roots so we can filter out roots that aren't
1034 # 'real' roots (i.e. are descended from other roots).
1034 # 'real' roots (i.e. are descended from other roots).
1035 roots = descendants.copy()
1035 roots = descendants.copy()
1036 # Our topologically sorted list of output nodes.
1036 # Our topologically sorted list of output nodes.
1037 orderedout = []
1037 orderedout = []
1038 # Don't start at nullid since we don't want nullid in our output list,
1038 # Don't start at nullid since we don't want nullid in our output list,
1039 # and if nullid shows up in descendants, empty parents will look like
1039 # and if nullid shows up in descendants, empty parents will look like
1040 # they're descendants.
1040 # they're descendants.
1041 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1041 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1042 n = self.node(r)
1042 n = self.node(r)
1043 isdescendant = False
1043 isdescendant = False
1044 if lowestrev == nullrev: # Everybody is a descendant of nullid
1044 if lowestrev == nullrev: # Everybody is a descendant of nullid
1045 isdescendant = True
1045 isdescendant = True
1046 elif n in descendants:
1046 elif n in descendants:
1047 # n is already a descendant
1047 # n is already a descendant
1048 isdescendant = True
1048 isdescendant = True
1049 # This check only needs to be done here because all the roots
1049 # This check only needs to be done here because all the roots
1050 # will start being marked is descendants before the loop.
1050 # will start being marked is descendants before the loop.
1051 if n in roots:
1051 if n in roots:
1052 # If n was a root, check if it's a 'real' root.
1052 # If n was a root, check if it's a 'real' root.
1053 p = tuple(self.parents(n))
1053 p = tuple(self.parents(n))
1054 # If any of its parents are descendants, it's not a root.
1054 # If any of its parents are descendants, it's not a root.
1055 if (p[0] in descendants) or (p[1] in descendants):
1055 if (p[0] in descendants) or (p[1] in descendants):
1056 roots.remove(n)
1056 roots.remove(n)
1057 else:
1057 else:
1058 p = tuple(self.parents(n))
1058 p = tuple(self.parents(n))
1059 # A node is a descendant if either of its parents are
1059 # A node is a descendant if either of its parents are
1060 # descendants. (We seeded the dependents list with the roots
1060 # descendants. (We seeded the dependents list with the roots
1061 # up there, remember?)
1061 # up there, remember?)
1062 if (p[0] in descendants) or (p[1] in descendants):
1062 if (p[0] in descendants) or (p[1] in descendants):
1063 descendants.add(n)
1063 descendants.add(n)
1064 isdescendant = True
1064 isdescendant = True
1065 if isdescendant and ((ancestors is None) or (n in ancestors)):
1065 if isdescendant and ((ancestors is None) or (n in ancestors)):
1066 # Only include nodes that are both descendants and ancestors.
1066 # Only include nodes that are both descendants and ancestors.
1067 orderedout.append(n)
1067 orderedout.append(n)
1068 if (ancestors is not None) and (n in heads):
1068 if (ancestors is not None) and (n in heads):
1069 # We're trying to figure out which heads are reachable
1069 # We're trying to figure out which heads are reachable
1070 # from roots.
1070 # from roots.
1071 # Mark this head as having been reached
1071 # Mark this head as having been reached
1072 heads[n] = True
1072 heads[n] = True
1073 elif ancestors is None:
1073 elif ancestors is None:
1074 # Otherwise, we're trying to discover the heads.
1074 # Otherwise, we're trying to discover the heads.
1075 # Assume this is a head because if it isn't, the next step
1075 # Assume this is a head because if it isn't, the next step
1076 # will eventually remove it.
1076 # will eventually remove it.
1077 heads[n] = True
1077 heads[n] = True
1078 # But, obviously its parents aren't.
1078 # But, obviously its parents aren't.
1079 for p in self.parents(n):
1079 for p in self.parents(n):
1080 heads.pop(p, None)
1080 heads.pop(p, None)
1081 heads = [head for head, flag in heads.iteritems() if flag]
1081 heads = [head for head, flag in heads.iteritems() if flag]
1082 roots = list(roots)
1082 roots = list(roots)
1083 assert orderedout
1083 assert orderedout
1084 assert roots
1084 assert roots
1085 assert heads
1085 assert heads
1086 return (orderedout, roots, heads)
1086 return (orderedout, roots, heads)
1087
1087
1088 def headrevs(self, revs=None):
1088 def headrevs(self, revs=None):
1089 if revs is None:
1089 if revs is None:
1090 try:
1090 try:
1091 return self.index.headrevs()
1091 return self.index.headrevs()
1092 except AttributeError:
1092 except AttributeError:
1093 return self._headrevs()
1093 return self._headrevs()
1094 if rustdagop is not None:
1094 if rustdagop is not None:
1095 return rustdagop.headrevs(self.index, revs)
1095 return rustdagop.headrevs(self.index, revs)
1096 return dagop.headrevs(revs, self._uncheckedparentrevs)
1096 return dagop.headrevs(revs, self._uncheckedparentrevs)
1097
1097
1098 def computephases(self, roots):
1098 def computephases(self, roots):
1099 return self.index.computephasesmapsets(roots)
1099 return self.index.computephasesmapsets(roots)
1100
1100
1101 def _headrevs(self):
1101 def _headrevs(self):
1102 count = len(self)
1102 count = len(self)
1103 if not count:
1103 if not count:
1104 return [nullrev]
1104 return [nullrev]
1105 # we won't iter over filtered rev so nobody is a head at start
1105 # we won't iter over filtered rev so nobody is a head at start
1106 ishead = [0] * (count + 1)
1106 ishead = [0] * (count + 1)
1107 index = self.index
1107 index = self.index
1108 for r in self:
1108 for r in self:
1109 ishead[r] = 1 # I may be an head
1109 ishead[r] = 1 # I may be an head
1110 e = index[r]
1110 e = index[r]
1111 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1111 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1112 return [r for r, val in enumerate(ishead) if val]
1112 return [r for r, val in enumerate(ishead) if val]
1113
1113
1114 def heads(self, start=None, stop=None):
1114 def heads(self, start=None, stop=None):
1115 """return the list of all nodes that have no children
1115 """return the list of all nodes that have no children
1116
1116
1117 if start is specified, only heads that are descendants of
1117 if start is specified, only heads that are descendants of
1118 start will be returned
1118 start will be returned
1119 if stop is specified, it will consider all the revs from stop
1119 if stop is specified, it will consider all the revs from stop
1120 as if they had no children
1120 as if they had no children
1121 """
1121 """
1122 if start is None and stop is None:
1122 if start is None and stop is None:
1123 if not len(self):
1123 if not len(self):
1124 return [nullid]
1124 return [nullid]
1125 return [self.node(r) for r in self.headrevs()]
1125 return [self.node(r) for r in self.headrevs()]
1126
1126
1127 if start is None:
1127 if start is None:
1128 start = nullrev
1128 start = nullrev
1129 else:
1129 else:
1130 start = self.rev(start)
1130 start = self.rev(start)
1131
1131
1132 stoprevs = set(self.rev(n) for n in stop or [])
1132 stoprevs = set(self.rev(n) for n in stop or [])
1133
1133
1134 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1134 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1135 stoprevs=stoprevs)
1135 stoprevs=stoprevs)
1136
1136
1137 return [self.node(rev) for rev in revs]
1137 return [self.node(rev) for rev in revs]
1138
1138
1139 def children(self, node):
1139 def children(self, node):
1140 """find the children of a given node"""
1140 """find the children of a given node"""
1141 c = []
1141 c = []
1142 p = self.rev(node)
1142 p = self.rev(node)
1143 for r in self.revs(start=p + 1):
1143 for r in self.revs(start=p + 1):
1144 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1144 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1145 if prevs:
1145 if prevs:
1146 for pr in prevs:
1146 for pr in prevs:
1147 if pr == p:
1147 if pr == p:
1148 c.append(self.node(r))
1148 c.append(self.node(r))
1149 elif p == nullrev:
1149 elif p == nullrev:
1150 c.append(self.node(r))
1150 c.append(self.node(r))
1151 return c
1151 return c
1152
1152
1153 def commonancestorsheads(self, a, b):
1153 def commonancestorsheads(self, a, b):
1154 """calculate all the heads of the common ancestors of nodes a and b"""
1154 """calculate all the heads of the common ancestors of nodes a and b"""
1155 a, b = self.rev(a), self.rev(b)
1155 a, b = self.rev(a), self.rev(b)
1156 ancs = self._commonancestorsheads(a, b)
1156 ancs = self._commonancestorsheads(a, b)
1157 return pycompat.maplist(self.node, ancs)
1157 return pycompat.maplist(self.node, ancs)
1158
1158
1159 def _commonancestorsheads(self, *revs):
1159 def _commonancestorsheads(self, *revs):
1160 """calculate all the heads of the common ancestors of revs"""
1160 """calculate all the heads of the common ancestors of revs"""
1161 try:
1161 try:
1162 ancs = self.index.commonancestorsheads(*revs)
1162 ancs = self.index.commonancestorsheads(*revs)
1163 except (AttributeError, OverflowError): # C implementation failed
1163 except (AttributeError, OverflowError): # C implementation failed
1164 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1164 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1165 return ancs
1165 return ancs
1166
1166
1167 def isancestor(self, a, b):
1167 def isancestor(self, a, b):
1168 """return True if node a is an ancestor of node b
1168 """return True if node a is an ancestor of node b
1169
1169
1170 A revision is considered an ancestor of itself."""
1170 A revision is considered an ancestor of itself."""
1171 a, b = self.rev(a), self.rev(b)
1171 a, b = self.rev(a), self.rev(b)
1172 return self.isancestorrev(a, b)
1172 return self.isancestorrev(a, b)
1173
1173
1174 def isancestorrev(self, a, b):
1174 def isancestorrev(self, a, b):
1175 """return True if revision a is an ancestor of revision b
1175 """return True if revision a is an ancestor of revision b
1176
1176
1177 A revision is considered an ancestor of itself.
1177 A revision is considered an ancestor of itself.
1178
1178
1179 The implementation of this is trivial but the use of
1179 The implementation of this is trivial but the use of
1180 reachableroots is not."""
1180 reachableroots is not."""
1181 if a == nullrev:
1181 if a == nullrev:
1182 return True
1182 return True
1183 elif a == b:
1183 elif a == b:
1184 return True
1184 return True
1185 elif a > b:
1185 elif a > b:
1186 return False
1186 return False
1187 return bool(self.reachableroots(a, [b], [a], includepath=False))
1187 return bool(self.reachableroots(a, [b], [a], includepath=False))
1188
1188
1189 def reachableroots(self, minroot, heads, roots, includepath=False):
1189 def reachableroots(self, minroot, heads, roots, includepath=False):
1190 """return (heads(::<roots> and <roots>::<heads>))
1190 """return (heads(::<roots> and <roots>::<heads>))
1191
1191
1192 If includepath is True, return (<roots>::<heads>)."""
1192 If includepath is True, return (<roots>::<heads>)."""
1193 try:
1193 try:
1194 return self.index.reachableroots2(minroot, heads, roots,
1194 return self.index.reachableroots2(minroot, heads, roots,
1195 includepath)
1195 includepath)
1196 except AttributeError:
1196 except AttributeError:
1197 return dagop._reachablerootspure(self.parentrevs,
1197 return dagop._reachablerootspure(self.parentrevs,
1198 minroot, roots, heads, includepath)
1198 minroot, roots, heads, includepath)
1199
1199
1200 def ancestor(self, a, b):
1200 def ancestor(self, a, b):
1201 """calculate the "best" common ancestor of nodes a and b"""
1201 """calculate the "best" common ancestor of nodes a and b"""
1202
1202
1203 a, b = self.rev(a), self.rev(b)
1203 a, b = self.rev(a), self.rev(b)
1204 try:
1204 try:
1205 ancs = self.index.ancestors(a, b)
1205 ancs = self.index.ancestors(a, b)
1206 except (AttributeError, OverflowError):
1206 except (AttributeError, OverflowError):
1207 ancs = ancestor.ancestors(self.parentrevs, a, b)
1207 ancs = ancestor.ancestors(self.parentrevs, a, b)
1208 if ancs:
1208 if ancs:
1209 # choose a consistent winner when there's a tie
1209 # choose a consistent winner when there's a tie
1210 return min(map(self.node, ancs))
1210 return min(map(self.node, ancs))
1211 return nullid
1211 return nullid
1212
1212
1213 def _match(self, id):
1213 def _match(self, id):
1214 if isinstance(id, int):
1214 if isinstance(id, int):
1215 # rev
1215 # rev
1216 return self.node(id)
1216 return self.node(id)
1217 if len(id) == 20:
1217 if len(id) == 20:
1218 # possibly a binary node
1218 # possibly a binary node
1219 # odds of a binary node being all hex in ASCII are 1 in 10**25
1219 # odds of a binary node being all hex in ASCII are 1 in 10**25
1220 try:
1220 try:
1221 node = id
1221 node = id
1222 self.rev(node) # quick search the index
1222 self.rev(node) # quick search the index
1223 return node
1223 return node
1224 except error.LookupError:
1224 except error.LookupError:
1225 pass # may be partial hex id
1225 pass # may be partial hex id
1226 try:
1226 try:
1227 # str(rev)
1227 # str(rev)
1228 rev = int(id)
1228 rev = int(id)
1229 if "%d" % rev != id:
1229 if "%d" % rev != id:
1230 raise ValueError
1230 raise ValueError
1231 if rev < 0:
1231 if rev < 0:
1232 rev = len(self) + rev
1232 rev = len(self) + rev
1233 if rev < 0 or rev >= len(self):
1233 if rev < 0 or rev >= len(self):
1234 raise ValueError
1234 raise ValueError
1235 return self.node(rev)
1235 return self.node(rev)
1236 except (ValueError, OverflowError):
1236 except (ValueError, OverflowError):
1237 pass
1237 pass
1238 if len(id) == 40:
1238 if len(id) == 40:
1239 try:
1239 try:
1240 # a full hex nodeid?
1240 # a full hex nodeid?
1241 node = bin(id)
1241 node = bin(id)
1242 self.rev(node)
1242 self.rev(node)
1243 return node
1243 return node
1244 except (TypeError, error.LookupError):
1244 except (TypeError, error.LookupError):
1245 pass
1245 pass
1246
1246
1247 def _partialmatch(self, id):
1247 def _partialmatch(self, id):
1248 # we don't care wdirfilenodeids as they should be always full hash
1248 # we don't care wdirfilenodeids as they should be always full hash
1249 maybewdir = wdirhex.startswith(id)
1249 maybewdir = wdirhex.startswith(id)
1250 try:
1250 try:
1251 partial = self.index.partialmatch(id)
1251 partial = self.index.partialmatch(id)
1252 if partial and self.hasnode(partial):
1252 if partial and self.hasnode(partial):
1253 if maybewdir:
1253 if maybewdir:
1254 # single 'ff...' match in radix tree, ambiguous with wdir
1254 # single 'ff...' match in radix tree, ambiguous with wdir
1255 raise error.RevlogError
1255 raise error.RevlogError
1256 return partial
1256 return partial
1257 if maybewdir:
1257 if maybewdir:
1258 # no 'ff...' match in radix tree, wdir identified
1258 # no 'ff...' match in radix tree, wdir identified
1259 raise error.WdirUnsupported
1259 raise error.WdirUnsupported
1260 return None
1260 return None
1261 except error.RevlogError:
1261 except error.RevlogError:
1262 # parsers.c radix tree lookup gave multiple matches
1262 # parsers.c radix tree lookup gave multiple matches
1263 # fast path: for unfiltered changelog, radix tree is accurate
1263 # fast path: for unfiltered changelog, radix tree is accurate
1264 if not getattr(self, 'filteredrevs', None):
1264 if not getattr(self, 'filteredrevs', None):
1265 raise error.AmbiguousPrefixLookupError(
1265 raise error.AmbiguousPrefixLookupError(
1266 id, self.indexfile, _('ambiguous identifier'))
1266 id, self.indexfile, _('ambiguous identifier'))
1267 # fall through to slow path that filters hidden revisions
1267 # fall through to slow path that filters hidden revisions
1268 except (AttributeError, ValueError):
1268 except (AttributeError, ValueError):
1269 # we are pure python, or key was too short to search radix tree
1269 # we are pure python, or key was too short to search radix tree
1270 pass
1270 pass
1271
1271
1272 if id in self._pcache:
1272 if id in self._pcache:
1273 return self._pcache[id]
1273 return self._pcache[id]
1274
1274
1275 if len(id) <= 40:
1275 if len(id) <= 40:
1276 try:
1276 try:
1277 # hex(node)[:...]
1277 # hex(node)[:...]
1278 l = len(id) // 2 # grab an even number of digits
1278 l = len(id) // 2 # grab an even number of digits
1279 prefix = bin(id[:l * 2])
1279 prefix = bin(id[:l * 2])
1280 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1280 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1281 nl = [n for n in nl if hex(n).startswith(id) and
1281 nl = [n for n in nl if hex(n).startswith(id) and
1282 self.hasnode(n)]
1282 self.hasnode(n)]
1283 if nullhex.startswith(id):
1283 if nullhex.startswith(id):
1284 nl.append(nullid)
1284 nl.append(nullid)
1285 if len(nl) > 0:
1285 if len(nl) > 0:
1286 if len(nl) == 1 and not maybewdir:
1286 if len(nl) == 1 and not maybewdir:
1287 self._pcache[id] = nl[0]
1287 self._pcache[id] = nl[0]
1288 return nl[0]
1288 return nl[0]
1289 raise error.AmbiguousPrefixLookupError(
1289 raise error.AmbiguousPrefixLookupError(
1290 id, self.indexfile, _('ambiguous identifier'))
1290 id, self.indexfile, _('ambiguous identifier'))
1291 if maybewdir:
1291 if maybewdir:
1292 raise error.WdirUnsupported
1292 raise error.WdirUnsupported
1293 return None
1293 return None
1294 except TypeError:
1294 except TypeError:
1295 pass
1295 pass
1296
1296
1297 def lookup(self, id):
1297 def lookup(self, id):
1298 """locate a node based on:
1298 """locate a node based on:
1299 - revision number or str(revision number)
1299 - revision number or str(revision number)
1300 - nodeid or subset of hex nodeid
1300 - nodeid or subset of hex nodeid
1301 """
1301 """
1302 n = self._match(id)
1302 n = self._match(id)
1303 if n is not None:
1303 if n is not None:
1304 return n
1304 return n
1305 n = self._partialmatch(id)
1305 n = self._partialmatch(id)
1306 if n:
1306 if n:
1307 return n
1307 return n
1308
1308
1309 raise error.LookupError(id, self.indexfile, _('no match found'))
1309 raise error.LookupError(id, self.indexfile, _('no match found'))
1310
1310
1311 def shortest(self, node, minlength=1):
1311 def shortest(self, node, minlength=1):
1312 """Find the shortest unambiguous prefix that matches node."""
1312 """Find the shortest unambiguous prefix that matches node."""
1313 def isvalid(prefix):
1313 def isvalid(prefix):
1314 try:
1314 try:
1315 matchednode = self._partialmatch(prefix)
1315 matchednode = self._partialmatch(prefix)
1316 except error.AmbiguousPrefixLookupError:
1316 except error.AmbiguousPrefixLookupError:
1317 return False
1317 return False
1318 except error.WdirUnsupported:
1318 except error.WdirUnsupported:
1319 # single 'ff...' match
1319 # single 'ff...' match
1320 return True
1320 return True
1321 if matchednode is None:
1321 if matchednode is None:
1322 raise error.LookupError(node, self.indexfile, _('no node'))
1322 raise error.LookupError(node, self.indexfile, _('no node'))
1323 return True
1323 return True
1324
1324
1325 def maybewdir(prefix):
1325 def maybewdir(prefix):
1326 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1326 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1327
1327
1328 hexnode = hex(node)
1328 hexnode = hex(node)
1329
1329
1330 def disambiguate(hexnode, minlength):
1330 def disambiguate(hexnode, minlength):
1331 """Disambiguate against wdirid."""
1331 """Disambiguate against wdirid."""
1332 for length in range(minlength, 41):
1332 for length in range(minlength, 41):
1333 prefix = hexnode[:length]
1333 prefix = hexnode[:length]
1334 if not maybewdir(prefix):
1334 if not maybewdir(prefix):
1335 return prefix
1335 return prefix
1336
1336
1337 if not getattr(self, 'filteredrevs', None):
1337 if not getattr(self, 'filteredrevs', None):
1338 try:
1338 try:
1339 length = max(self.index.shortest(node), minlength)
1339 length = max(self.index.shortest(node), minlength)
1340 return disambiguate(hexnode, length)
1340 return disambiguate(hexnode, length)
1341 except error.RevlogError:
1341 except error.RevlogError:
1342 if node != wdirid:
1342 if node != wdirid:
1343 raise error.LookupError(node, self.indexfile, _('no node'))
1343 raise error.LookupError(node, self.indexfile, _('no node'))
1344 except AttributeError:
1344 except AttributeError:
1345 # Fall through to pure code
1345 # Fall through to pure code
1346 pass
1346 pass
1347
1347
1348 if node == wdirid:
1348 if node == wdirid:
1349 for length in range(minlength, 41):
1349 for length in range(minlength, 41):
1350 prefix = hexnode[:length]
1350 prefix = hexnode[:length]
1351 if isvalid(prefix):
1351 if isvalid(prefix):
1352 return prefix
1352 return prefix
1353
1353
1354 for length in range(minlength, 41):
1354 for length in range(minlength, 41):
1355 prefix = hexnode[:length]
1355 prefix = hexnode[:length]
1356 if isvalid(prefix):
1356 if isvalid(prefix):
1357 return disambiguate(hexnode, length)
1357 return disambiguate(hexnode, length)
1358
1358
1359 def cmp(self, node, text):
1359 def cmp(self, node, text):
1360 """compare text with a given file revision
1360 """compare text with a given file revision
1361
1361
1362 returns True if text is different than what is stored.
1362 returns True if text is different than what is stored.
1363 """
1363 """
1364 p1, p2 = self.parents(node)
1364 p1, p2 = self.parents(node)
1365 return storageutil.hashrevisionsha1(text, p1, p2) != node
1365 return storageutil.hashrevisionsha1(text, p1, p2) != node
1366
1366
1367 def _cachesegment(self, offset, data):
1367 def _cachesegment(self, offset, data):
1368 """Add a segment to the revlog cache.
1368 """Add a segment to the revlog cache.
1369
1369
1370 Accepts an absolute offset and the data that is at that location.
1370 Accepts an absolute offset and the data that is at that location.
1371 """
1371 """
1372 o, d = self._chunkcache
1372 o, d = self._chunkcache
1373 # try to add to existing cache
1373 # try to add to existing cache
1374 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1374 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1375 self._chunkcache = o, d + data
1375 self._chunkcache = o, d + data
1376 else:
1376 else:
1377 self._chunkcache = offset, data
1377 self._chunkcache = offset, data
1378
1378
1379 def _readsegment(self, offset, length, df=None):
1379 def _readsegment(self, offset, length, df=None):
1380 """Load a segment of raw data from the revlog.
1380 """Load a segment of raw data from the revlog.
1381
1381
1382 Accepts an absolute offset, length to read, and an optional existing
1382 Accepts an absolute offset, length to read, and an optional existing
1383 file handle to read from.
1383 file handle to read from.
1384
1384
1385 If an existing file handle is passed, it will be seeked and the
1385 If an existing file handle is passed, it will be seeked and the
1386 original seek position will NOT be restored.
1386 original seek position will NOT be restored.
1387
1387
1388 Returns a str or buffer of raw byte data.
1388 Returns a str or buffer of raw byte data.
1389
1389
1390 Raises if the requested number of bytes could not be read.
1390 Raises if the requested number of bytes could not be read.
1391 """
1391 """
1392 # Cache data both forward and backward around the requested
1392 # Cache data both forward and backward around the requested
1393 # data, in a fixed size window. This helps speed up operations
1393 # data, in a fixed size window. This helps speed up operations
1394 # involving reading the revlog backwards.
1394 # involving reading the revlog backwards.
1395 cachesize = self._chunkcachesize
1395 cachesize = self._chunkcachesize
1396 realoffset = offset & ~(cachesize - 1)
1396 realoffset = offset & ~(cachesize - 1)
1397 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1397 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1398 - realoffset)
1398 - realoffset)
1399 with self._datareadfp(df) as df:
1399 with self._datareadfp(df) as df:
1400 df.seek(realoffset)
1400 df.seek(realoffset)
1401 d = df.read(reallength)
1401 d = df.read(reallength)
1402
1402
1403 self._cachesegment(realoffset, d)
1403 self._cachesegment(realoffset, d)
1404 if offset != realoffset or reallength != length:
1404 if offset != realoffset or reallength != length:
1405 startoffset = offset - realoffset
1405 startoffset = offset - realoffset
1406 if len(d) - startoffset < length:
1406 if len(d) - startoffset < length:
1407 raise error.RevlogError(
1407 raise error.RevlogError(
1408 _('partial read of revlog %s; expected %d bytes from '
1408 _('partial read of revlog %s; expected %d bytes from '
1409 'offset %d, got %d') %
1409 'offset %d, got %d') %
1410 (self.indexfile if self._inline else self.datafile,
1410 (self.indexfile if self._inline else self.datafile,
1411 length, realoffset, len(d) - startoffset))
1411 length, realoffset, len(d) - startoffset))
1412
1412
1413 return util.buffer(d, startoffset, length)
1413 return util.buffer(d, startoffset, length)
1414
1414
1415 if len(d) < length:
1415 if len(d) < length:
1416 raise error.RevlogError(
1416 raise error.RevlogError(
1417 _('partial read of revlog %s; expected %d bytes from offset '
1417 _('partial read of revlog %s; expected %d bytes from offset '
1418 '%d, got %d') %
1418 '%d, got %d') %
1419 (self.indexfile if self._inline else self.datafile,
1419 (self.indexfile if self._inline else self.datafile,
1420 length, offset, len(d)))
1420 length, offset, len(d)))
1421
1421
1422 return d
1422 return d
1423
1423
1424 def _getsegment(self, offset, length, df=None):
1424 def _getsegment(self, offset, length, df=None):
1425 """Obtain a segment of raw data from the revlog.
1425 """Obtain a segment of raw data from the revlog.
1426
1426
1427 Accepts an absolute offset, length of bytes to obtain, and an
1427 Accepts an absolute offset, length of bytes to obtain, and an
1428 optional file handle to the already-opened revlog. If the file
1428 optional file handle to the already-opened revlog. If the file
1429 handle is used, it's original seek position will not be preserved.
1429 handle is used, it's original seek position will not be preserved.
1430
1430
1431 Requests for data may be returned from a cache.
1431 Requests for data may be returned from a cache.
1432
1432
1433 Returns a str or a buffer instance of raw byte data.
1433 Returns a str or a buffer instance of raw byte data.
1434 """
1434 """
1435 o, d = self._chunkcache
1435 o, d = self._chunkcache
1436 l = len(d)
1436 l = len(d)
1437
1437
1438 # is it in the cache?
1438 # is it in the cache?
1439 cachestart = offset - o
1439 cachestart = offset - o
1440 cacheend = cachestart + length
1440 cacheend = cachestart + length
1441 if cachestart >= 0 and cacheend <= l:
1441 if cachestart >= 0 and cacheend <= l:
1442 if cachestart == 0 and cacheend == l:
1442 if cachestart == 0 and cacheend == l:
1443 return d # avoid a copy
1443 return d # avoid a copy
1444 return util.buffer(d, cachestart, cacheend - cachestart)
1444 return util.buffer(d, cachestart, cacheend - cachestart)
1445
1445
1446 return self._readsegment(offset, length, df=df)
1446 return self._readsegment(offset, length, df=df)
1447
1447
1448 def _getsegmentforrevs(self, startrev, endrev, df=None):
1448 def _getsegmentforrevs(self, startrev, endrev, df=None):
1449 """Obtain a segment of raw data corresponding to a range of revisions.
1449 """Obtain a segment of raw data corresponding to a range of revisions.
1450
1450
1451 Accepts the start and end revisions and an optional already-open
1451 Accepts the start and end revisions and an optional already-open
1452 file handle to be used for reading. If the file handle is read, its
1452 file handle to be used for reading. If the file handle is read, its
1453 seek position will not be preserved.
1453 seek position will not be preserved.
1454
1454
1455 Requests for data may be satisfied by a cache.
1455 Requests for data may be satisfied by a cache.
1456
1456
1457 Returns a 2-tuple of (offset, data) for the requested range of
1457 Returns a 2-tuple of (offset, data) for the requested range of
1458 revisions. Offset is the integer offset from the beginning of the
1458 revisions. Offset is the integer offset from the beginning of the
1459 revlog and data is a str or buffer of the raw byte data.
1459 revlog and data is a str or buffer of the raw byte data.
1460
1460
1461 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1461 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1462 to determine where each revision's data begins and ends.
1462 to determine where each revision's data begins and ends.
1463 """
1463 """
1464 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1464 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1465 # (functions are expensive).
1465 # (functions are expensive).
1466 index = self.index
1466 index = self.index
1467 istart = index[startrev]
1467 istart = index[startrev]
1468 start = int(istart[0] >> 16)
1468 start = int(istart[0] >> 16)
1469 if startrev == endrev:
1469 if startrev == endrev:
1470 end = start + istart[1]
1470 end = start + istart[1]
1471 else:
1471 else:
1472 iend = index[endrev]
1472 iend = index[endrev]
1473 end = int(iend[0] >> 16) + iend[1]
1473 end = int(iend[0] >> 16) + iend[1]
1474
1474
1475 if self._inline:
1475 if self._inline:
1476 start += (startrev + 1) * self._io.size
1476 start += (startrev + 1) * self._io.size
1477 end += (endrev + 1) * self._io.size
1477 end += (endrev + 1) * self._io.size
1478 length = end - start
1478 length = end - start
1479
1479
1480 return start, self._getsegment(start, length, df=df)
1480 return start, self._getsegment(start, length, df=df)
1481
1481
1482 def _chunk(self, rev, df=None):
1482 def _chunk(self, rev, df=None):
1483 """Obtain a single decompressed chunk for a revision.
1483 """Obtain a single decompressed chunk for a revision.
1484
1484
1485 Accepts an integer revision and an optional already-open file handle
1485 Accepts an integer revision and an optional already-open file handle
1486 to be used for reading. If used, the seek position of the file will not
1486 to be used for reading. If used, the seek position of the file will not
1487 be preserved.
1487 be preserved.
1488
1488
1489 Returns a str holding uncompressed data for the requested revision.
1489 Returns a str holding uncompressed data for the requested revision.
1490 """
1490 """
1491 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1491 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1492
1492
1493 def _chunks(self, revs, df=None, targetsize=None):
1493 def _chunks(self, revs, df=None, targetsize=None):
1494 """Obtain decompressed chunks for the specified revisions.
1494 """Obtain decompressed chunks for the specified revisions.
1495
1495
1496 Accepts an iterable of numeric revisions that are assumed to be in
1496 Accepts an iterable of numeric revisions that are assumed to be in
1497 ascending order. Also accepts an optional already-open file handle
1497 ascending order. Also accepts an optional already-open file handle
1498 to be used for reading. If used, the seek position of the file will
1498 to be used for reading. If used, the seek position of the file will
1499 not be preserved.
1499 not be preserved.
1500
1500
1501 This function is similar to calling ``self._chunk()`` multiple times,
1501 This function is similar to calling ``self._chunk()`` multiple times,
1502 but is faster.
1502 but is faster.
1503
1503
1504 Returns a list with decompressed data for each requested revision.
1504 Returns a list with decompressed data for each requested revision.
1505 """
1505 """
1506 if not revs:
1506 if not revs:
1507 return []
1507 return []
1508 start = self.start
1508 start = self.start
1509 length = self.length
1509 length = self.length
1510 inline = self._inline
1510 inline = self._inline
1511 iosize = self._io.size
1511 iosize = self._io.size
1512 buffer = util.buffer
1512 buffer = util.buffer
1513
1513
1514 l = []
1514 l = []
1515 ladd = l.append
1515 ladd = l.append
1516
1516
1517 if not self._withsparseread:
1517 if not self._withsparseread:
1518 slicedchunks = (revs,)
1518 slicedchunks = (revs,)
1519 else:
1519 else:
1520 slicedchunks = deltautil.slicechunk(self, revs,
1520 slicedchunks = deltautil.slicechunk(self, revs,
1521 targetsize=targetsize)
1521 targetsize=targetsize)
1522
1522
1523 for revschunk in slicedchunks:
1523 for revschunk in slicedchunks:
1524 firstrev = revschunk[0]
1524 firstrev = revschunk[0]
1525 # Skip trailing revisions with empty diff
1525 # Skip trailing revisions with empty diff
1526 for lastrev in revschunk[::-1]:
1526 for lastrev in revschunk[::-1]:
1527 if length(lastrev) != 0:
1527 if length(lastrev) != 0:
1528 break
1528 break
1529
1529
1530 try:
1530 try:
1531 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1531 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1532 except OverflowError:
1532 except OverflowError:
1533 # issue4215 - we can't cache a run of chunks greater than
1533 # issue4215 - we can't cache a run of chunks greater than
1534 # 2G on Windows
1534 # 2G on Windows
1535 return [self._chunk(rev, df=df) for rev in revschunk]
1535 return [self._chunk(rev, df=df) for rev in revschunk]
1536
1536
1537 decomp = self.decompress
1537 decomp = self.decompress
1538 for rev in revschunk:
1538 for rev in revschunk:
1539 chunkstart = start(rev)
1539 chunkstart = start(rev)
1540 if inline:
1540 if inline:
1541 chunkstart += (rev + 1) * iosize
1541 chunkstart += (rev + 1) * iosize
1542 chunklength = length(rev)
1542 chunklength = length(rev)
1543 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1543 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1544
1544
1545 return l
1545 return l
1546
1546
1547 def _chunkclear(self):
1547 def _chunkclear(self):
1548 """Clear the raw chunk cache."""
1548 """Clear the raw chunk cache."""
1549 self._chunkcache = (0, '')
1549 self._chunkcache = (0, '')
1550
1550
1551 def deltaparent(self, rev):
1551 def deltaparent(self, rev):
1552 """return deltaparent of the given revision"""
1552 """return deltaparent of the given revision"""
1553 base = self.index[rev][3]
1553 base = self.index[rev][3]
1554 if base == rev:
1554 if base == rev:
1555 return nullrev
1555 return nullrev
1556 elif self._generaldelta:
1556 elif self._generaldelta:
1557 return base
1557 return base
1558 else:
1558 else:
1559 return rev - 1
1559 return rev - 1
1560
1560
1561 def issnapshot(self, rev):
1561 def issnapshot(self, rev):
1562 """tells whether rev is a snapshot
1562 """tells whether rev is a snapshot
1563 """
1563 """
1564 if not self._sparserevlog:
1564 if not self._sparserevlog:
1565 return self.deltaparent(rev) == nullrev
1565 return self.deltaparent(rev) == nullrev
1566 elif util.safehasattr(self.index, 'issnapshot'):
1566 elif util.safehasattr(self.index, 'issnapshot'):
1567 # directly assign the method to cache the testing and access
1567 # directly assign the method to cache the testing and access
1568 self.issnapshot = self.index.issnapshot
1568 self.issnapshot = self.index.issnapshot
1569 return self.issnapshot(rev)
1569 return self.issnapshot(rev)
1570 if rev == nullrev:
1570 if rev == nullrev:
1571 return True
1571 return True
1572 entry = self.index[rev]
1572 entry = self.index[rev]
1573 base = entry[3]
1573 base = entry[3]
1574 if base == rev:
1574 if base == rev:
1575 return True
1575 return True
1576 if base == nullrev:
1576 if base == nullrev:
1577 return True
1577 return True
1578 p1 = entry[5]
1578 p1 = entry[5]
1579 p2 = entry[6]
1579 p2 = entry[6]
1580 if base == p1 or base == p2:
1580 if base == p1 or base == p2:
1581 return False
1581 return False
1582 return self.issnapshot(base)
1582 return self.issnapshot(base)
1583
1583
1584 def snapshotdepth(self, rev):
1584 def snapshotdepth(self, rev):
1585 """number of snapshot in the chain before this one"""
1585 """number of snapshot in the chain before this one"""
1586 if not self.issnapshot(rev):
1586 if not self.issnapshot(rev):
1587 raise error.ProgrammingError('revision %d not a snapshot')
1587 raise error.ProgrammingError('revision %d not a snapshot')
1588 return len(self._deltachain(rev)[0]) - 1
1588 return len(self._deltachain(rev)[0]) - 1
1589
1589
1590 def revdiff(self, rev1, rev2):
1590 def revdiff(self, rev1, rev2):
1591 """return or calculate a delta between two revisions
1591 """return or calculate a delta between two revisions
1592
1592
1593 The delta calculated is in binary form and is intended to be written to
1593 The delta calculated is in binary form and is intended to be written to
1594 revlog data directly. So this function needs raw revision data.
1594 revlog data directly. So this function needs raw revision data.
1595 """
1595 """
1596 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1596 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1597 return bytes(self._chunk(rev2))
1597 return bytes(self._chunk(rev2))
1598
1598
1599 return mdiff.textdiff(self.rawdata(rev1),
1599 return mdiff.textdiff(self.rawdata(rev1),
1600 self.rawdata(rev2))
1600 self.rawdata(rev2))
1601
1601
1602 def revision(self, nodeorrev, _df=None, raw=False):
1602 def revision(self, nodeorrev, _df=None, raw=False):
1603 """return an uncompressed revision of a given node or revision
1603 """return an uncompressed revision of a given node or revision
1604 number.
1604 number.
1605
1605
1606 _df - an existing file handle to read from. (internal-only)
1606 _df - an existing file handle to read from. (internal-only)
1607 raw - an optional argument specifying if the revision data is to be
1607 raw - an optional argument specifying if the revision data is to be
1608 treated as raw data when applying flag transforms. 'raw' should be set
1608 treated as raw data when applying flag transforms. 'raw' should be set
1609 to True when generating changegroups or in debug commands.
1609 to True when generating changegroups or in debug commands.
1610 """
1610 """
1611 return self._revisiondata(nodeorrev, _df, raw=raw)
1611 return self._revisiondata(nodeorrev, _df, raw=raw)
1612
1612
1613 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1613 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1614 if isinstance(nodeorrev, int):
1614 if isinstance(nodeorrev, int):
1615 rev = nodeorrev
1615 rev = nodeorrev
1616 node = self.node(rev)
1616 node = self.node(rev)
1617 else:
1617 else:
1618 node = nodeorrev
1618 node = nodeorrev
1619 rev = None
1619 rev = None
1620
1620
1621 cachedrev = None
1621 cachedrev = None
1622 flags = None
1622 flags = None
1623 rawtext = None
1623 rawtext = None
1624 basetext = None
1624 if node == nullid:
1625 if node == nullid:
1625 return ""
1626 return ""
1626 if self._revisioncache:
1627 if self._revisioncache:
1627 if self._revisioncache[0] == node:
1628 if self._revisioncache[0] == node:
1628 # _cache only stores rawtext
1629 # _cache only stores rawtext
1629 # rawtext is reusable. but we might need to run flag processors
1630 # rawtext is reusable. but we might need to run flag processors
1630 rawtext = self._revisioncache[2]
1631 rawtext = self._revisioncache[2]
1631 if raw:
1632 if raw:
1632 return rawtext
1633 return rawtext
1633 # duplicated, but good for perf
1634 # duplicated, but good for perf
1634 if rev is None:
1635 if rev is None:
1635 rev = self.rev(node)
1636 rev = self.rev(node)
1636 if flags is None:
1637 if flags is None:
1637 flags = self.flags(rev)
1638 flags = self.flags(rev)
1638 # no extra flags set, no flag processor runs, text = rawtext
1639 # no extra flags set, no flag processor runs, text = rawtext
1639 if flags == REVIDX_DEFAULT_FLAGS:
1640 if flags == REVIDX_DEFAULT_FLAGS:
1640 return rawtext
1641 return rawtext
1641
1642
1642 cachedrev = self._revisioncache[1]
1643 cachedrev = self._revisioncache[1]
1643
1644
1644 # look up what we need to read
1645 # look up what we need to read
1645 if rawtext is None:
1646 if rawtext is None:
1646 if rev is None:
1647 if rev is None:
1647 rev = self.rev(node)
1648 rev = self.rev(node)
1648
1649
1649 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1650 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1650 if stopped:
1651 if stopped:
1651 rawtext = self._revisioncache[2]
1652 basetext = self._revisioncache[2]
1652
1653
1653 # drop cache to save memory
1654 # drop cache to save memory
1654 self._revisioncache = None
1655 self._revisioncache = None
1655
1656
1656 targetsize = None
1657 targetsize = None
1657 rawsize = self.index[rev][2]
1658 rawsize = self.index[rev][2]
1658 if 0 <= rawsize:
1659 if 0 <= rawsize:
1659 targetsize = 4 * rawsize
1660 targetsize = 4 * rawsize
1660
1661
1661 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1662 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1662 if rawtext is None:
1663 if basetext is None:
1663 rawtext = bytes(bins[0])
1664 basetext = bytes(bins[0])
1664 bins = bins[1:]
1665 bins = bins[1:]
1665
1666
1666 rawtext = mdiff.patches(rawtext, bins)
1667 rawtext = mdiff.patches(basetext, bins)
1668 del basetext # let us have a chance to free memory early
1667 self._revisioncache = (node, rev, rawtext)
1669 self._revisioncache = (node, rev, rawtext)
1668
1670
1669 if flags is None:
1671 if flags is None:
1670 if rev is None:
1672 if rev is None:
1671 rev = self.rev(node)
1673 rev = self.rev(node)
1672 flags = self.flags(rev)
1674 flags = self.flags(rev)
1673
1675
1674 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1676 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1675 if validatehash:
1677 if validatehash:
1676 self.checkhash(text, node, rev=rev)
1678 self.checkhash(text, node, rev=rev)
1677
1679
1678 return text
1680 return text
1679
1681
1680 def rawdata(self, nodeorrev, _df=None):
1682 def rawdata(self, nodeorrev, _df=None):
1681 """return an uncompressed raw data of a given node or revision number.
1683 """return an uncompressed raw data of a given node or revision number.
1682
1684
1683 _df - an existing file handle to read from. (internal-only)
1685 _df - an existing file handle to read from. (internal-only)
1684 """
1686 """
1685 return self._revisiondata(nodeorrev, _df, raw=True)
1687 return self._revisiondata(nodeorrev, _df, raw=True)
1686
1688
1687 def hash(self, text, p1, p2):
1689 def hash(self, text, p1, p2):
1688 """Compute a node hash.
1690 """Compute a node hash.
1689
1691
1690 Available as a function so that subclasses can replace the hash
1692 Available as a function so that subclasses can replace the hash
1691 as needed.
1693 as needed.
1692 """
1694 """
1693 return storageutil.hashrevisionsha1(text, p1, p2)
1695 return storageutil.hashrevisionsha1(text, p1, p2)
1694
1696
1695 def _processflags(self, text, flags, operation, raw=False):
1697 def _processflags(self, text, flags, operation, raw=False):
1696 """Inspect revision data flags and applies transforms defined by
1698 """Inspect revision data flags and applies transforms defined by
1697 registered flag processors.
1699 registered flag processors.
1698
1700
1699 ``text`` - the revision data to process
1701 ``text`` - the revision data to process
1700 ``flags`` - the revision flags
1702 ``flags`` - the revision flags
1701 ``operation`` - the operation being performed (read or write)
1703 ``operation`` - the operation being performed (read or write)
1702 ``raw`` - an optional argument describing if the raw transform should be
1704 ``raw`` - an optional argument describing if the raw transform should be
1703 applied.
1705 applied.
1704
1706
1705 This method processes the flags in the order (or reverse order if
1707 This method processes the flags in the order (or reverse order if
1706 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1708 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1707 flag processors registered for present flags. The order of flags defined
1709 flag processors registered for present flags. The order of flags defined
1708 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1710 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1709
1711
1710 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1712 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1711 processed text and ``validatehash`` is a bool indicating whether the
1713 processed text and ``validatehash`` is a bool indicating whether the
1712 returned text should be checked for hash integrity.
1714 returned text should be checked for hash integrity.
1713
1715
1714 Note: If the ``raw`` argument is set, it has precedence over the
1716 Note: If the ``raw`` argument is set, it has precedence over the
1715 operation and will only update the value of ``validatehash``.
1717 operation and will only update the value of ``validatehash``.
1716 """
1718 """
1717 # fast path: no flag processors will run
1719 # fast path: no flag processors will run
1718 if flags == 0:
1720 if flags == 0:
1719 return text, True
1721 return text, True
1720 if not operation in ('read', 'write'):
1722 if not operation in ('read', 'write'):
1721 raise error.ProgrammingError(_("invalid '%s' operation") %
1723 raise error.ProgrammingError(_("invalid '%s' operation") %
1722 operation)
1724 operation)
1723 # Check all flags are known.
1725 # Check all flags are known.
1724 if flags & ~flagutil.REVIDX_KNOWN_FLAGS:
1726 if flags & ~flagutil.REVIDX_KNOWN_FLAGS:
1725 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1727 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1726 (flags & ~flagutil.REVIDX_KNOWN_FLAGS))
1728 (flags & ~flagutil.REVIDX_KNOWN_FLAGS))
1727 validatehash = True
1729 validatehash = True
1728 # Depending on the operation (read or write), the order might be
1730 # Depending on the operation (read or write), the order might be
1729 # reversed due to non-commutative transforms.
1731 # reversed due to non-commutative transforms.
1730 orderedflags = REVIDX_FLAGS_ORDER
1732 orderedflags = REVIDX_FLAGS_ORDER
1731 if operation == 'write':
1733 if operation == 'write':
1732 orderedflags = reversed(orderedflags)
1734 orderedflags = reversed(orderedflags)
1733
1735
1734 for flag in orderedflags:
1736 for flag in orderedflags:
1735 # If a flagprocessor has been registered for a known flag, apply the
1737 # If a flagprocessor has been registered for a known flag, apply the
1736 # related operation transform and update result tuple.
1738 # related operation transform and update result tuple.
1737 if flag & flags:
1739 if flag & flags:
1738 vhash = True
1740 vhash = True
1739
1741
1740 if flag not in self._flagprocessors:
1742 if flag not in self._flagprocessors:
1741 message = _("missing processor for flag '%#x'") % (flag)
1743 message = _("missing processor for flag '%#x'") % (flag)
1742 raise error.RevlogError(message)
1744 raise error.RevlogError(message)
1743
1745
1744 processor = self._flagprocessors[flag]
1746 processor = self._flagprocessors[flag]
1745 if processor is not None:
1747 if processor is not None:
1746 readtransform, writetransform, rawtransform = processor
1748 readtransform, writetransform, rawtransform = processor
1747
1749
1748 if raw:
1750 if raw:
1749 vhash = rawtransform(self, text)
1751 vhash = rawtransform(self, text)
1750 elif operation == 'read':
1752 elif operation == 'read':
1751 text, vhash = readtransform(self, text)
1753 text, vhash = readtransform(self, text)
1752 else: # write operation
1754 else: # write operation
1753 text, vhash = writetransform(self, text)
1755 text, vhash = writetransform(self, text)
1754 validatehash = validatehash and vhash
1756 validatehash = validatehash and vhash
1755
1757
1756 return text, validatehash
1758 return text, validatehash
1757
1759
1758 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1760 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1759 """Check node hash integrity.
1761 """Check node hash integrity.
1760
1762
1761 Available as a function so that subclasses can extend hash mismatch
1763 Available as a function so that subclasses can extend hash mismatch
1762 behaviors as needed.
1764 behaviors as needed.
1763 """
1765 """
1764 try:
1766 try:
1765 if p1 is None and p2 is None:
1767 if p1 is None and p2 is None:
1766 p1, p2 = self.parents(node)
1768 p1, p2 = self.parents(node)
1767 if node != self.hash(text, p1, p2):
1769 if node != self.hash(text, p1, p2):
1768 # Clear the revision cache on hash failure. The revision cache
1770 # Clear the revision cache on hash failure. The revision cache
1769 # only stores the raw revision and clearing the cache does have
1771 # only stores the raw revision and clearing the cache does have
1770 # the side-effect that we won't have a cache hit when the raw
1772 # the side-effect that we won't have a cache hit when the raw
1771 # revision data is accessed. But this case should be rare and
1773 # revision data is accessed. But this case should be rare and
1772 # it is extra work to teach the cache about the hash
1774 # it is extra work to teach the cache about the hash
1773 # verification state.
1775 # verification state.
1774 if self._revisioncache and self._revisioncache[0] == node:
1776 if self._revisioncache and self._revisioncache[0] == node:
1775 self._revisioncache = None
1777 self._revisioncache = None
1776
1778
1777 revornode = rev
1779 revornode = rev
1778 if revornode is None:
1780 if revornode is None:
1779 revornode = templatefilters.short(hex(node))
1781 revornode = templatefilters.short(hex(node))
1780 raise error.RevlogError(_("integrity check failed on %s:%s")
1782 raise error.RevlogError(_("integrity check failed on %s:%s")
1781 % (self.indexfile, pycompat.bytestr(revornode)))
1783 % (self.indexfile, pycompat.bytestr(revornode)))
1782 except error.RevlogError:
1784 except error.RevlogError:
1783 if self._censorable and storageutil.iscensoredtext(text):
1785 if self._censorable and storageutil.iscensoredtext(text):
1784 raise error.CensoredNodeError(self.indexfile, node, text)
1786 raise error.CensoredNodeError(self.indexfile, node, text)
1785 raise
1787 raise
1786
1788
1787 def _enforceinlinesize(self, tr, fp=None):
1789 def _enforceinlinesize(self, tr, fp=None):
1788 """Check if the revlog is too big for inline and convert if so.
1790 """Check if the revlog is too big for inline and convert if so.
1789
1791
1790 This should be called after revisions are added to the revlog. If the
1792 This should be called after revisions are added to the revlog. If the
1791 revlog has grown too large to be an inline revlog, it will convert it
1793 revlog has grown too large to be an inline revlog, it will convert it
1792 to use multiple index and data files.
1794 to use multiple index and data files.
1793 """
1795 """
1794 tiprev = len(self) - 1
1796 tiprev = len(self) - 1
1795 if (not self._inline or
1797 if (not self._inline or
1796 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1798 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1797 return
1799 return
1798
1800
1799 trinfo = tr.find(self.indexfile)
1801 trinfo = tr.find(self.indexfile)
1800 if trinfo is None:
1802 if trinfo is None:
1801 raise error.RevlogError(_("%s not found in the transaction")
1803 raise error.RevlogError(_("%s not found in the transaction")
1802 % self.indexfile)
1804 % self.indexfile)
1803
1805
1804 trindex = trinfo[2]
1806 trindex = trinfo[2]
1805 if trindex is not None:
1807 if trindex is not None:
1806 dataoff = self.start(trindex)
1808 dataoff = self.start(trindex)
1807 else:
1809 else:
1808 # revlog was stripped at start of transaction, use all leftover data
1810 # revlog was stripped at start of transaction, use all leftover data
1809 trindex = len(self) - 1
1811 trindex = len(self) - 1
1810 dataoff = self.end(tiprev)
1812 dataoff = self.end(tiprev)
1811
1813
1812 tr.add(self.datafile, dataoff)
1814 tr.add(self.datafile, dataoff)
1813
1815
1814 if fp:
1816 if fp:
1815 fp.flush()
1817 fp.flush()
1816 fp.close()
1818 fp.close()
1817 # We can't use the cached file handle after close(). So prevent
1819 # We can't use the cached file handle after close(). So prevent
1818 # its usage.
1820 # its usage.
1819 self._writinghandles = None
1821 self._writinghandles = None
1820
1822
1821 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1823 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1822 for r in self:
1824 for r in self:
1823 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1825 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1824
1826
1825 with self._indexfp('w') as fp:
1827 with self._indexfp('w') as fp:
1826 self.version &= ~FLAG_INLINE_DATA
1828 self.version &= ~FLAG_INLINE_DATA
1827 self._inline = False
1829 self._inline = False
1828 io = self._io
1830 io = self._io
1829 for i in self:
1831 for i in self:
1830 e = io.packentry(self.index[i], self.node, self.version, i)
1832 e = io.packentry(self.index[i], self.node, self.version, i)
1831 fp.write(e)
1833 fp.write(e)
1832
1834
1833 # the temp file replace the real index when we exit the context
1835 # the temp file replace the real index when we exit the context
1834 # manager
1836 # manager
1835
1837
1836 tr.replace(self.indexfile, trindex * self._io.size)
1838 tr.replace(self.indexfile, trindex * self._io.size)
1837 self._chunkclear()
1839 self._chunkclear()
1838
1840
1839 def _nodeduplicatecallback(self, transaction, node):
1841 def _nodeduplicatecallback(self, transaction, node):
1840 """called when trying to add a node already stored.
1842 """called when trying to add a node already stored.
1841 """
1843 """
1842
1844
1843 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1845 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1844 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1846 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1845 """add a revision to the log
1847 """add a revision to the log
1846
1848
1847 text - the revision data to add
1849 text - the revision data to add
1848 transaction - the transaction object used for rollback
1850 transaction - the transaction object used for rollback
1849 link - the linkrev data to add
1851 link - the linkrev data to add
1850 p1, p2 - the parent nodeids of the revision
1852 p1, p2 - the parent nodeids of the revision
1851 cachedelta - an optional precomputed delta
1853 cachedelta - an optional precomputed delta
1852 node - nodeid of revision; typically node is not specified, and it is
1854 node - nodeid of revision; typically node is not specified, and it is
1853 computed by default as hash(text, p1, p2), however subclasses might
1855 computed by default as hash(text, p1, p2), however subclasses might
1854 use different hashing method (and override checkhash() in such case)
1856 use different hashing method (and override checkhash() in such case)
1855 flags - the known flags to set on the revision
1857 flags - the known flags to set on the revision
1856 deltacomputer - an optional deltacomputer instance shared between
1858 deltacomputer - an optional deltacomputer instance shared between
1857 multiple calls
1859 multiple calls
1858 """
1860 """
1859 if link == nullrev:
1861 if link == nullrev:
1860 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1862 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1861 % self.indexfile)
1863 % self.indexfile)
1862
1864
1863 if flags:
1865 if flags:
1864 node = node or self.hash(text, p1, p2)
1866 node = node or self.hash(text, p1, p2)
1865
1867
1866 rawtext, validatehash = self._processflags(text, flags, 'write')
1868 rawtext, validatehash = self._processflags(text, flags, 'write')
1867
1869
1868 # If the flag processor modifies the revision data, ignore any provided
1870 # If the flag processor modifies the revision data, ignore any provided
1869 # cachedelta.
1871 # cachedelta.
1870 if rawtext != text:
1872 if rawtext != text:
1871 cachedelta = None
1873 cachedelta = None
1872
1874
1873 if len(rawtext) > _maxentrysize:
1875 if len(rawtext) > _maxentrysize:
1874 raise error.RevlogError(
1876 raise error.RevlogError(
1875 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1877 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1876 % (self.indexfile, len(rawtext)))
1878 % (self.indexfile, len(rawtext)))
1877
1879
1878 node = node or self.hash(rawtext, p1, p2)
1880 node = node or self.hash(rawtext, p1, p2)
1879 if node in self.nodemap:
1881 if node in self.nodemap:
1880 return node
1882 return node
1881
1883
1882 if validatehash:
1884 if validatehash:
1883 self.checkhash(rawtext, node, p1=p1, p2=p2)
1885 self.checkhash(rawtext, node, p1=p1, p2=p2)
1884
1886
1885 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1887 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1886 flags, cachedelta=cachedelta,
1888 flags, cachedelta=cachedelta,
1887 deltacomputer=deltacomputer)
1889 deltacomputer=deltacomputer)
1888
1890
1889 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1891 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1890 cachedelta=None, deltacomputer=None):
1892 cachedelta=None, deltacomputer=None):
1891 """add a raw revision with known flags, node and parents
1893 """add a raw revision with known flags, node and parents
1892 useful when reusing a revision not stored in this revlog (ex: received
1894 useful when reusing a revision not stored in this revlog (ex: received
1893 over wire, or read from an external bundle).
1895 over wire, or read from an external bundle).
1894 """
1896 """
1895 dfh = None
1897 dfh = None
1896 if not self._inline:
1898 if not self._inline:
1897 dfh = self._datafp("a+")
1899 dfh = self._datafp("a+")
1898 ifh = self._indexfp("a+")
1900 ifh = self._indexfp("a+")
1899 try:
1901 try:
1900 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1902 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1901 flags, cachedelta, ifh, dfh,
1903 flags, cachedelta, ifh, dfh,
1902 deltacomputer=deltacomputer)
1904 deltacomputer=deltacomputer)
1903 finally:
1905 finally:
1904 if dfh:
1906 if dfh:
1905 dfh.close()
1907 dfh.close()
1906 ifh.close()
1908 ifh.close()
1907
1909
1908 def compress(self, data):
1910 def compress(self, data):
1909 """Generate a possibly-compressed representation of data."""
1911 """Generate a possibly-compressed representation of data."""
1910 if not data:
1912 if not data:
1911 return '', data
1913 return '', data
1912
1914
1913 compressed = self._compressor.compress(data)
1915 compressed = self._compressor.compress(data)
1914
1916
1915 if compressed:
1917 if compressed:
1916 # The revlog compressor added the header in the returned data.
1918 # The revlog compressor added the header in the returned data.
1917 return '', compressed
1919 return '', compressed
1918
1920
1919 if data[0:1] == '\0':
1921 if data[0:1] == '\0':
1920 return '', data
1922 return '', data
1921 return 'u', data
1923 return 'u', data
1922
1924
1923 def decompress(self, data):
1925 def decompress(self, data):
1924 """Decompress a revlog chunk.
1926 """Decompress a revlog chunk.
1925
1927
1926 The chunk is expected to begin with a header identifying the
1928 The chunk is expected to begin with a header identifying the
1927 format type so it can be routed to an appropriate decompressor.
1929 format type so it can be routed to an appropriate decompressor.
1928 """
1930 """
1929 if not data:
1931 if not data:
1930 return data
1932 return data
1931
1933
1932 # Revlogs are read much more frequently than they are written and many
1934 # Revlogs are read much more frequently than they are written and many
1933 # chunks only take microseconds to decompress, so performance is
1935 # chunks only take microseconds to decompress, so performance is
1934 # important here.
1936 # important here.
1935 #
1937 #
1936 # We can make a few assumptions about revlogs:
1938 # We can make a few assumptions about revlogs:
1937 #
1939 #
1938 # 1) the majority of chunks will be compressed (as opposed to inline
1940 # 1) the majority of chunks will be compressed (as opposed to inline
1939 # raw data).
1941 # raw data).
1940 # 2) decompressing *any* data will likely by at least 10x slower than
1942 # 2) decompressing *any* data will likely by at least 10x slower than
1941 # returning raw inline data.
1943 # returning raw inline data.
1942 # 3) we want to prioritize common and officially supported compression
1944 # 3) we want to prioritize common and officially supported compression
1943 # engines
1945 # engines
1944 #
1946 #
1945 # It follows that we want to optimize for "decompress compressed data
1947 # It follows that we want to optimize for "decompress compressed data
1946 # when encoded with common and officially supported compression engines"
1948 # when encoded with common and officially supported compression engines"
1947 # case over "raw data" and "data encoded by less common or non-official
1949 # case over "raw data" and "data encoded by less common or non-official
1948 # compression engines." That is why we have the inline lookup first
1950 # compression engines." That is why we have the inline lookup first
1949 # followed by the compengines lookup.
1951 # followed by the compengines lookup.
1950 #
1952 #
1951 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1953 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1952 # compressed chunks. And this matters for changelog and manifest reads.
1954 # compressed chunks. And this matters for changelog and manifest reads.
1953 t = data[0:1]
1955 t = data[0:1]
1954
1956
1955 if t == 'x':
1957 if t == 'x':
1956 try:
1958 try:
1957 return _zlibdecompress(data)
1959 return _zlibdecompress(data)
1958 except zlib.error as e:
1960 except zlib.error as e:
1959 raise error.RevlogError(_('revlog decompress error: %s') %
1961 raise error.RevlogError(_('revlog decompress error: %s') %
1960 stringutil.forcebytestr(e))
1962 stringutil.forcebytestr(e))
1961 # '\0' is more common than 'u' so it goes first.
1963 # '\0' is more common than 'u' so it goes first.
1962 elif t == '\0':
1964 elif t == '\0':
1963 return data
1965 return data
1964 elif t == 'u':
1966 elif t == 'u':
1965 return util.buffer(data, 1)
1967 return util.buffer(data, 1)
1966
1968
1967 try:
1969 try:
1968 compressor = self._decompressors[t]
1970 compressor = self._decompressors[t]
1969 except KeyError:
1971 except KeyError:
1970 try:
1972 try:
1971 engine = util.compengines.forrevlogheader(t)
1973 engine = util.compengines.forrevlogheader(t)
1972 compressor = engine.revlogcompressor(self._compengineopts)
1974 compressor = engine.revlogcompressor(self._compengineopts)
1973 self._decompressors[t] = compressor
1975 self._decompressors[t] = compressor
1974 except KeyError:
1976 except KeyError:
1975 raise error.RevlogError(_('unknown compression type %r') % t)
1977 raise error.RevlogError(_('unknown compression type %r') % t)
1976
1978
1977 return compressor.decompress(data)
1979 return compressor.decompress(data)
1978
1980
1979 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1981 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1980 cachedelta, ifh, dfh, alwayscache=False,
1982 cachedelta, ifh, dfh, alwayscache=False,
1981 deltacomputer=None):
1983 deltacomputer=None):
1982 """internal function to add revisions to the log
1984 """internal function to add revisions to the log
1983
1985
1984 see addrevision for argument descriptions.
1986 see addrevision for argument descriptions.
1985
1987
1986 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1988 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1987
1989
1988 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1990 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1989 be used.
1991 be used.
1990
1992
1991 invariants:
1993 invariants:
1992 - rawtext is optional (can be None); if not set, cachedelta must be set.
1994 - rawtext is optional (can be None); if not set, cachedelta must be set.
1993 if both are set, they must correspond to each other.
1995 if both are set, they must correspond to each other.
1994 """
1996 """
1995 if node == nullid:
1997 if node == nullid:
1996 raise error.RevlogError(_("%s: attempt to add null revision") %
1998 raise error.RevlogError(_("%s: attempt to add null revision") %
1997 self.indexfile)
1999 self.indexfile)
1998 if node == wdirid or node in wdirfilenodeids:
2000 if node == wdirid or node in wdirfilenodeids:
1999 raise error.RevlogError(_("%s: attempt to add wdir revision") %
2001 raise error.RevlogError(_("%s: attempt to add wdir revision") %
2000 self.indexfile)
2002 self.indexfile)
2001
2003
2002 if self._inline:
2004 if self._inline:
2003 fh = ifh
2005 fh = ifh
2004 else:
2006 else:
2005 fh = dfh
2007 fh = dfh
2006
2008
2007 btext = [rawtext]
2009 btext = [rawtext]
2008
2010
2009 curr = len(self)
2011 curr = len(self)
2010 prev = curr - 1
2012 prev = curr - 1
2011 offset = self.end(prev)
2013 offset = self.end(prev)
2012 p1r, p2r = self.rev(p1), self.rev(p2)
2014 p1r, p2r = self.rev(p1), self.rev(p2)
2013
2015
2014 # full versions are inserted when the needed deltas
2016 # full versions are inserted when the needed deltas
2015 # become comparable to the uncompressed text
2017 # become comparable to the uncompressed text
2016 if rawtext is None:
2018 if rawtext is None:
2017 # need rawtext size, before changed by flag processors, which is
2019 # need rawtext size, before changed by flag processors, which is
2018 # the non-raw size. use revlog explicitly to avoid filelog's extra
2020 # the non-raw size. use revlog explicitly to avoid filelog's extra
2019 # logic that might remove metadata size.
2021 # logic that might remove metadata size.
2020 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2022 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2021 cachedelta[1])
2023 cachedelta[1])
2022 else:
2024 else:
2023 textlen = len(rawtext)
2025 textlen = len(rawtext)
2024
2026
2025 if deltacomputer is None:
2027 if deltacomputer is None:
2026 deltacomputer = deltautil.deltacomputer(self)
2028 deltacomputer = deltautil.deltacomputer(self)
2027
2029
2028 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2030 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2029
2031
2030 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2032 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2031
2033
2032 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2034 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2033 deltainfo.base, link, p1r, p2r, node)
2035 deltainfo.base, link, p1r, p2r, node)
2034 self.index.append(e)
2036 self.index.append(e)
2035 self.nodemap[node] = curr
2037 self.nodemap[node] = curr
2036
2038
2037 # Reset the pure node cache start lookup offset to account for new
2039 # Reset the pure node cache start lookup offset to account for new
2038 # revision.
2040 # revision.
2039 if self._nodepos is not None:
2041 if self._nodepos is not None:
2040 self._nodepos = curr
2042 self._nodepos = curr
2041
2043
2042 entry = self._io.packentry(e, self.node, self.version, curr)
2044 entry = self._io.packentry(e, self.node, self.version, curr)
2043 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2045 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2044 link, offset)
2046 link, offset)
2045
2047
2046 rawtext = btext[0]
2048 rawtext = btext[0]
2047
2049
2048 if alwayscache and rawtext is None:
2050 if alwayscache and rawtext is None:
2049 rawtext = deltacomputer.buildtext(revinfo, fh)
2051 rawtext = deltacomputer.buildtext(revinfo, fh)
2050
2052
2051 if type(rawtext) == bytes: # only accept immutable objects
2053 if type(rawtext) == bytes: # only accept immutable objects
2052 self._revisioncache = (node, curr, rawtext)
2054 self._revisioncache = (node, curr, rawtext)
2053 self._chainbasecache[curr] = deltainfo.chainbase
2055 self._chainbasecache[curr] = deltainfo.chainbase
2054 return node
2056 return node
2055
2057
2056 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2058 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2057 # Files opened in a+ mode have inconsistent behavior on various
2059 # Files opened in a+ mode have inconsistent behavior on various
2058 # platforms. Windows requires that a file positioning call be made
2060 # platforms. Windows requires that a file positioning call be made
2059 # when the file handle transitions between reads and writes. See
2061 # when the file handle transitions between reads and writes. See
2060 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2062 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2061 # platforms, Python or the platform itself can be buggy. Some versions
2063 # platforms, Python or the platform itself can be buggy. Some versions
2062 # of Solaris have been observed to not append at the end of the file
2064 # of Solaris have been observed to not append at the end of the file
2063 # if the file was seeked to before the end. See issue4943 for more.
2065 # if the file was seeked to before the end. See issue4943 for more.
2064 #
2066 #
2065 # We work around this issue by inserting a seek() before writing.
2067 # We work around this issue by inserting a seek() before writing.
2066 # Note: This is likely not necessary on Python 3. However, because
2068 # Note: This is likely not necessary on Python 3. However, because
2067 # the file handle is reused for reads and may be seeked there, we need
2069 # the file handle is reused for reads and may be seeked there, we need
2068 # to be careful before changing this.
2070 # to be careful before changing this.
2069 ifh.seek(0, os.SEEK_END)
2071 ifh.seek(0, os.SEEK_END)
2070 if dfh:
2072 if dfh:
2071 dfh.seek(0, os.SEEK_END)
2073 dfh.seek(0, os.SEEK_END)
2072
2074
2073 curr = len(self) - 1
2075 curr = len(self) - 1
2074 if not self._inline:
2076 if not self._inline:
2075 transaction.add(self.datafile, offset)
2077 transaction.add(self.datafile, offset)
2076 transaction.add(self.indexfile, curr * len(entry))
2078 transaction.add(self.indexfile, curr * len(entry))
2077 if data[0]:
2079 if data[0]:
2078 dfh.write(data[0])
2080 dfh.write(data[0])
2079 dfh.write(data[1])
2081 dfh.write(data[1])
2080 ifh.write(entry)
2082 ifh.write(entry)
2081 else:
2083 else:
2082 offset += curr * self._io.size
2084 offset += curr * self._io.size
2083 transaction.add(self.indexfile, offset, curr)
2085 transaction.add(self.indexfile, offset, curr)
2084 ifh.write(entry)
2086 ifh.write(entry)
2085 ifh.write(data[0])
2087 ifh.write(data[0])
2086 ifh.write(data[1])
2088 ifh.write(data[1])
2087 self._enforceinlinesize(transaction, ifh)
2089 self._enforceinlinesize(transaction, ifh)
2088
2090
2089 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2091 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2090 """
2092 """
2091 add a delta group
2093 add a delta group
2092
2094
2093 given a set of deltas, add them to the revision log. the
2095 given a set of deltas, add them to the revision log. the
2094 first delta is against its parent, which should be in our
2096 first delta is against its parent, which should be in our
2095 log, the rest are against the previous delta.
2097 log, the rest are against the previous delta.
2096
2098
2097 If ``addrevisioncb`` is defined, it will be called with arguments of
2099 If ``addrevisioncb`` is defined, it will be called with arguments of
2098 this revlog and the node that was added.
2100 this revlog and the node that was added.
2099 """
2101 """
2100
2102
2101 if self._writinghandles:
2103 if self._writinghandles:
2102 raise error.ProgrammingError('cannot nest addgroup() calls')
2104 raise error.ProgrammingError('cannot nest addgroup() calls')
2103
2105
2104 nodes = []
2106 nodes = []
2105
2107
2106 r = len(self)
2108 r = len(self)
2107 end = 0
2109 end = 0
2108 if r:
2110 if r:
2109 end = self.end(r - 1)
2111 end = self.end(r - 1)
2110 ifh = self._indexfp("a+")
2112 ifh = self._indexfp("a+")
2111 isize = r * self._io.size
2113 isize = r * self._io.size
2112 if self._inline:
2114 if self._inline:
2113 transaction.add(self.indexfile, end + isize, r)
2115 transaction.add(self.indexfile, end + isize, r)
2114 dfh = None
2116 dfh = None
2115 else:
2117 else:
2116 transaction.add(self.indexfile, isize, r)
2118 transaction.add(self.indexfile, isize, r)
2117 transaction.add(self.datafile, end)
2119 transaction.add(self.datafile, end)
2118 dfh = self._datafp("a+")
2120 dfh = self._datafp("a+")
2119 def flush():
2121 def flush():
2120 if dfh:
2122 if dfh:
2121 dfh.flush()
2123 dfh.flush()
2122 ifh.flush()
2124 ifh.flush()
2123
2125
2124 self._writinghandles = (ifh, dfh)
2126 self._writinghandles = (ifh, dfh)
2125
2127
2126 try:
2128 try:
2127 deltacomputer = deltautil.deltacomputer(self)
2129 deltacomputer = deltautil.deltacomputer(self)
2128 # loop through our set of deltas
2130 # loop through our set of deltas
2129 for data in deltas:
2131 for data in deltas:
2130 node, p1, p2, linknode, deltabase, delta, flags = data
2132 node, p1, p2, linknode, deltabase, delta, flags = data
2131 link = linkmapper(linknode)
2133 link = linkmapper(linknode)
2132 flags = flags or REVIDX_DEFAULT_FLAGS
2134 flags = flags or REVIDX_DEFAULT_FLAGS
2133
2135
2134 nodes.append(node)
2136 nodes.append(node)
2135
2137
2136 if node in self.nodemap:
2138 if node in self.nodemap:
2137 self._nodeduplicatecallback(transaction, node)
2139 self._nodeduplicatecallback(transaction, node)
2138 # this can happen if two branches make the same change
2140 # this can happen if two branches make the same change
2139 continue
2141 continue
2140
2142
2141 for p in (p1, p2):
2143 for p in (p1, p2):
2142 if p not in self.nodemap:
2144 if p not in self.nodemap:
2143 raise error.LookupError(p, self.indexfile,
2145 raise error.LookupError(p, self.indexfile,
2144 _('unknown parent'))
2146 _('unknown parent'))
2145
2147
2146 if deltabase not in self.nodemap:
2148 if deltabase not in self.nodemap:
2147 raise error.LookupError(deltabase, self.indexfile,
2149 raise error.LookupError(deltabase, self.indexfile,
2148 _('unknown delta base'))
2150 _('unknown delta base'))
2149
2151
2150 baserev = self.rev(deltabase)
2152 baserev = self.rev(deltabase)
2151
2153
2152 if baserev != nullrev and self.iscensored(baserev):
2154 if baserev != nullrev and self.iscensored(baserev):
2153 # if base is censored, delta must be full replacement in a
2155 # if base is censored, delta must be full replacement in a
2154 # single patch operation
2156 # single patch operation
2155 hlen = struct.calcsize(">lll")
2157 hlen = struct.calcsize(">lll")
2156 oldlen = self.rawsize(baserev)
2158 oldlen = self.rawsize(baserev)
2157 newlen = len(delta) - hlen
2159 newlen = len(delta) - hlen
2158 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2160 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2159 raise error.CensoredBaseError(self.indexfile,
2161 raise error.CensoredBaseError(self.indexfile,
2160 self.node(baserev))
2162 self.node(baserev))
2161
2163
2162 if not flags and self._peek_iscensored(baserev, delta, flush):
2164 if not flags and self._peek_iscensored(baserev, delta, flush):
2163 flags |= REVIDX_ISCENSORED
2165 flags |= REVIDX_ISCENSORED
2164
2166
2165 # We assume consumers of addrevisioncb will want to retrieve
2167 # We assume consumers of addrevisioncb will want to retrieve
2166 # the added revision, which will require a call to
2168 # the added revision, which will require a call to
2167 # revision(). revision() will fast path if there is a cache
2169 # revision(). revision() will fast path if there is a cache
2168 # hit. So, we tell _addrevision() to always cache in this case.
2170 # hit. So, we tell _addrevision() to always cache in this case.
2169 # We're only using addgroup() in the context of changegroup
2171 # We're only using addgroup() in the context of changegroup
2170 # generation so the revision data can always be handled as raw
2172 # generation so the revision data can always be handled as raw
2171 # by the flagprocessor.
2173 # by the flagprocessor.
2172 self._addrevision(node, None, transaction, link,
2174 self._addrevision(node, None, transaction, link,
2173 p1, p2, flags, (baserev, delta),
2175 p1, p2, flags, (baserev, delta),
2174 ifh, dfh,
2176 ifh, dfh,
2175 alwayscache=bool(addrevisioncb),
2177 alwayscache=bool(addrevisioncb),
2176 deltacomputer=deltacomputer)
2178 deltacomputer=deltacomputer)
2177
2179
2178 if addrevisioncb:
2180 if addrevisioncb:
2179 addrevisioncb(self, node)
2181 addrevisioncb(self, node)
2180
2182
2181 if not dfh and not self._inline:
2183 if not dfh and not self._inline:
2182 # addrevision switched from inline to conventional
2184 # addrevision switched from inline to conventional
2183 # reopen the index
2185 # reopen the index
2184 ifh.close()
2186 ifh.close()
2185 dfh = self._datafp("a+")
2187 dfh = self._datafp("a+")
2186 ifh = self._indexfp("a+")
2188 ifh = self._indexfp("a+")
2187 self._writinghandles = (ifh, dfh)
2189 self._writinghandles = (ifh, dfh)
2188 finally:
2190 finally:
2189 self._writinghandles = None
2191 self._writinghandles = None
2190
2192
2191 if dfh:
2193 if dfh:
2192 dfh.close()
2194 dfh.close()
2193 ifh.close()
2195 ifh.close()
2194
2196
2195 return nodes
2197 return nodes
2196
2198
2197 def iscensored(self, rev):
2199 def iscensored(self, rev):
2198 """Check if a file revision is censored."""
2200 """Check if a file revision is censored."""
2199 if not self._censorable:
2201 if not self._censorable:
2200 return False
2202 return False
2201
2203
2202 return self.flags(rev) & REVIDX_ISCENSORED
2204 return self.flags(rev) & REVIDX_ISCENSORED
2203
2205
2204 def _peek_iscensored(self, baserev, delta, flush):
2206 def _peek_iscensored(self, baserev, delta, flush):
2205 """Quickly check if a delta produces a censored revision."""
2207 """Quickly check if a delta produces a censored revision."""
2206 if not self._censorable:
2208 if not self._censorable:
2207 return False
2209 return False
2208
2210
2209 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2211 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2210
2212
2211 def getstrippoint(self, minlink):
2213 def getstrippoint(self, minlink):
2212 """find the minimum rev that must be stripped to strip the linkrev
2214 """find the minimum rev that must be stripped to strip the linkrev
2213
2215
2214 Returns a tuple containing the minimum rev and a set of all revs that
2216 Returns a tuple containing the minimum rev and a set of all revs that
2215 have linkrevs that will be broken by this strip.
2217 have linkrevs that will be broken by this strip.
2216 """
2218 """
2217 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2219 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2218 self.headrevs(),
2220 self.headrevs(),
2219 self.linkrev, self.parentrevs)
2221 self.linkrev, self.parentrevs)
2220
2222
2221 def strip(self, minlink, transaction):
2223 def strip(self, minlink, transaction):
2222 """truncate the revlog on the first revision with a linkrev >= minlink
2224 """truncate the revlog on the first revision with a linkrev >= minlink
2223
2225
2224 This function is called when we're stripping revision minlink and
2226 This function is called when we're stripping revision minlink and
2225 its descendants from the repository.
2227 its descendants from the repository.
2226
2228
2227 We have to remove all revisions with linkrev >= minlink, because
2229 We have to remove all revisions with linkrev >= minlink, because
2228 the equivalent changelog revisions will be renumbered after the
2230 the equivalent changelog revisions will be renumbered after the
2229 strip.
2231 strip.
2230
2232
2231 So we truncate the revlog on the first of these revisions, and
2233 So we truncate the revlog on the first of these revisions, and
2232 trust that the caller has saved the revisions that shouldn't be
2234 trust that the caller has saved the revisions that shouldn't be
2233 removed and that it'll re-add them after this truncation.
2235 removed and that it'll re-add them after this truncation.
2234 """
2236 """
2235 if len(self) == 0:
2237 if len(self) == 0:
2236 return
2238 return
2237
2239
2238 rev, _ = self.getstrippoint(minlink)
2240 rev, _ = self.getstrippoint(minlink)
2239 if rev == len(self):
2241 if rev == len(self):
2240 return
2242 return
2241
2243
2242 # first truncate the files on disk
2244 # first truncate the files on disk
2243 end = self.start(rev)
2245 end = self.start(rev)
2244 if not self._inline:
2246 if not self._inline:
2245 transaction.add(self.datafile, end)
2247 transaction.add(self.datafile, end)
2246 end = rev * self._io.size
2248 end = rev * self._io.size
2247 else:
2249 else:
2248 end += rev * self._io.size
2250 end += rev * self._io.size
2249
2251
2250 transaction.add(self.indexfile, end)
2252 transaction.add(self.indexfile, end)
2251
2253
2252 # then reset internal state in memory to forget those revisions
2254 # then reset internal state in memory to forget those revisions
2253 self._revisioncache = None
2255 self._revisioncache = None
2254 self._chaininfocache = {}
2256 self._chaininfocache = {}
2255 self._chunkclear()
2257 self._chunkclear()
2256 for x in pycompat.xrange(rev, len(self)):
2258 for x in pycompat.xrange(rev, len(self)):
2257 del self.nodemap[self.node(x)]
2259 del self.nodemap[self.node(x)]
2258
2260
2259 del self.index[rev:-1]
2261 del self.index[rev:-1]
2260 self._nodepos = None
2262 self._nodepos = None
2261
2263
2262 def checksize(self):
2264 def checksize(self):
2263 """Check size of index and data files
2265 """Check size of index and data files
2264
2266
2265 return a (dd, di) tuple.
2267 return a (dd, di) tuple.
2266 - dd: extra bytes for the "data" file
2268 - dd: extra bytes for the "data" file
2267 - di: extra bytes for the "index" file
2269 - di: extra bytes for the "index" file
2268
2270
2269 A healthy revlog will return (0, 0).
2271 A healthy revlog will return (0, 0).
2270 """
2272 """
2271 expected = 0
2273 expected = 0
2272 if len(self):
2274 if len(self):
2273 expected = max(0, self.end(len(self) - 1))
2275 expected = max(0, self.end(len(self) - 1))
2274
2276
2275 try:
2277 try:
2276 with self._datafp() as f:
2278 with self._datafp() as f:
2277 f.seek(0, io.SEEK_END)
2279 f.seek(0, io.SEEK_END)
2278 actual = f.tell()
2280 actual = f.tell()
2279 dd = actual - expected
2281 dd = actual - expected
2280 except IOError as inst:
2282 except IOError as inst:
2281 if inst.errno != errno.ENOENT:
2283 if inst.errno != errno.ENOENT:
2282 raise
2284 raise
2283 dd = 0
2285 dd = 0
2284
2286
2285 try:
2287 try:
2286 f = self.opener(self.indexfile)
2288 f = self.opener(self.indexfile)
2287 f.seek(0, io.SEEK_END)
2289 f.seek(0, io.SEEK_END)
2288 actual = f.tell()
2290 actual = f.tell()
2289 f.close()
2291 f.close()
2290 s = self._io.size
2292 s = self._io.size
2291 i = max(0, actual // s)
2293 i = max(0, actual // s)
2292 di = actual - (i * s)
2294 di = actual - (i * s)
2293 if self._inline:
2295 if self._inline:
2294 databytes = 0
2296 databytes = 0
2295 for r in self:
2297 for r in self:
2296 databytes += max(0, self.length(r))
2298 databytes += max(0, self.length(r))
2297 dd = 0
2299 dd = 0
2298 di = actual - len(self) * s - databytes
2300 di = actual - len(self) * s - databytes
2299 except IOError as inst:
2301 except IOError as inst:
2300 if inst.errno != errno.ENOENT:
2302 if inst.errno != errno.ENOENT:
2301 raise
2303 raise
2302 di = 0
2304 di = 0
2303
2305
2304 return (dd, di)
2306 return (dd, di)
2305
2307
2306 def files(self):
2308 def files(self):
2307 res = [self.indexfile]
2309 res = [self.indexfile]
2308 if not self._inline:
2310 if not self._inline:
2309 res.append(self.datafile)
2311 res.append(self.datafile)
2310 return res
2312 return res
2311
2313
2312 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2314 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2313 assumehaveparentrevisions=False,
2315 assumehaveparentrevisions=False,
2314 deltamode=repository.CG_DELTAMODE_STD):
2316 deltamode=repository.CG_DELTAMODE_STD):
2315 if nodesorder not in ('nodes', 'storage', 'linear', None):
2317 if nodesorder not in ('nodes', 'storage', 'linear', None):
2316 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2318 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2317 nodesorder)
2319 nodesorder)
2318
2320
2319 if nodesorder is None and not self._generaldelta:
2321 if nodesorder is None and not self._generaldelta:
2320 nodesorder = 'storage'
2322 nodesorder = 'storage'
2321
2323
2322 if (not self._storedeltachains and
2324 if (not self._storedeltachains and
2323 deltamode != repository.CG_DELTAMODE_PREV):
2325 deltamode != repository.CG_DELTAMODE_PREV):
2324 deltamode = repository.CG_DELTAMODE_FULL
2326 deltamode = repository.CG_DELTAMODE_FULL
2325
2327
2326 return storageutil.emitrevisions(
2328 return storageutil.emitrevisions(
2327 self, nodes, nodesorder, revlogrevisiondelta,
2329 self, nodes, nodesorder, revlogrevisiondelta,
2328 deltaparentfn=self.deltaparent,
2330 deltaparentfn=self.deltaparent,
2329 candeltafn=self.candelta,
2331 candeltafn=self.candelta,
2330 rawsizefn=self.rawsize,
2332 rawsizefn=self.rawsize,
2331 revdifffn=self.revdiff,
2333 revdifffn=self.revdiff,
2332 flagsfn=self.flags,
2334 flagsfn=self.flags,
2333 deltamode=deltamode,
2335 deltamode=deltamode,
2334 revisiondata=revisiondata,
2336 revisiondata=revisiondata,
2335 assumehaveparentrevisions=assumehaveparentrevisions)
2337 assumehaveparentrevisions=assumehaveparentrevisions)
2336
2338
2337 DELTAREUSEALWAYS = 'always'
2339 DELTAREUSEALWAYS = 'always'
2338 DELTAREUSESAMEREVS = 'samerevs'
2340 DELTAREUSESAMEREVS = 'samerevs'
2339 DELTAREUSENEVER = 'never'
2341 DELTAREUSENEVER = 'never'
2340
2342
2341 DELTAREUSEFULLADD = 'fulladd'
2343 DELTAREUSEFULLADD = 'fulladd'
2342
2344
2343 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2345 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2344
2346
2345 def clone(self, tr, destrevlog, addrevisioncb=None,
2347 def clone(self, tr, destrevlog, addrevisioncb=None,
2346 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2348 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2347 """Copy this revlog to another, possibly with format changes.
2349 """Copy this revlog to another, possibly with format changes.
2348
2350
2349 The destination revlog will contain the same revisions and nodes.
2351 The destination revlog will contain the same revisions and nodes.
2350 However, it may not be bit-for-bit identical due to e.g. delta encoding
2352 However, it may not be bit-for-bit identical due to e.g. delta encoding
2351 differences.
2353 differences.
2352
2354
2353 The ``deltareuse`` argument control how deltas from the existing revlog
2355 The ``deltareuse`` argument control how deltas from the existing revlog
2354 are preserved in the destination revlog. The argument can have the
2356 are preserved in the destination revlog. The argument can have the
2355 following values:
2357 following values:
2356
2358
2357 DELTAREUSEALWAYS
2359 DELTAREUSEALWAYS
2358 Deltas will always be reused (if possible), even if the destination
2360 Deltas will always be reused (if possible), even if the destination
2359 revlog would not select the same revisions for the delta. This is the
2361 revlog would not select the same revisions for the delta. This is the
2360 fastest mode of operation.
2362 fastest mode of operation.
2361 DELTAREUSESAMEREVS
2363 DELTAREUSESAMEREVS
2362 Deltas will be reused if the destination revlog would pick the same
2364 Deltas will be reused if the destination revlog would pick the same
2363 revisions for the delta. This mode strikes a balance between speed
2365 revisions for the delta. This mode strikes a balance between speed
2364 and optimization.
2366 and optimization.
2365 DELTAREUSENEVER
2367 DELTAREUSENEVER
2366 Deltas will never be reused. This is the slowest mode of execution.
2368 Deltas will never be reused. This is the slowest mode of execution.
2367 This mode can be used to recompute deltas (e.g. if the diff/delta
2369 This mode can be used to recompute deltas (e.g. if the diff/delta
2368 algorithm changes).
2370 algorithm changes).
2369
2371
2370 Delta computation can be slow, so the choice of delta reuse policy can
2372 Delta computation can be slow, so the choice of delta reuse policy can
2371 significantly affect run time.
2373 significantly affect run time.
2372
2374
2373 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2375 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2374 two extremes. Deltas will be reused if they are appropriate. But if the
2376 two extremes. Deltas will be reused if they are appropriate. But if the
2375 delta could choose a better revision, it will do so. This means if you
2377 delta could choose a better revision, it will do so. This means if you
2376 are converting a non-generaldelta revlog to a generaldelta revlog,
2378 are converting a non-generaldelta revlog to a generaldelta revlog,
2377 deltas will be recomputed if the delta's parent isn't a parent of the
2379 deltas will be recomputed if the delta's parent isn't a parent of the
2378 revision.
2380 revision.
2379
2381
2380 In addition to the delta policy, the ``forcedeltabothparents``
2382 In addition to the delta policy, the ``forcedeltabothparents``
2381 argument controls whether to force compute deltas against both parents
2383 argument controls whether to force compute deltas against both parents
2382 for merges. By default, the current default is used.
2384 for merges. By default, the current default is used.
2383 """
2385 """
2384 if deltareuse not in self.DELTAREUSEALL:
2386 if deltareuse not in self.DELTAREUSEALL:
2385 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2387 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2386
2388
2387 if len(destrevlog):
2389 if len(destrevlog):
2388 raise ValueError(_('destination revlog is not empty'))
2390 raise ValueError(_('destination revlog is not empty'))
2389
2391
2390 if getattr(self, 'filteredrevs', None):
2392 if getattr(self, 'filteredrevs', None):
2391 raise ValueError(_('source revlog has filtered revisions'))
2393 raise ValueError(_('source revlog has filtered revisions'))
2392 if getattr(destrevlog, 'filteredrevs', None):
2394 if getattr(destrevlog, 'filteredrevs', None):
2393 raise ValueError(_('destination revlog has filtered revisions'))
2395 raise ValueError(_('destination revlog has filtered revisions'))
2394
2396
2395 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2397 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2396 # if possible.
2398 # if possible.
2397 oldlazydelta = destrevlog._lazydelta
2399 oldlazydelta = destrevlog._lazydelta
2398 oldlazydeltabase = destrevlog._lazydeltabase
2400 oldlazydeltabase = destrevlog._lazydeltabase
2399 oldamd = destrevlog._deltabothparents
2401 oldamd = destrevlog._deltabothparents
2400
2402
2401 try:
2403 try:
2402 if deltareuse == self.DELTAREUSEALWAYS:
2404 if deltareuse == self.DELTAREUSEALWAYS:
2403 destrevlog._lazydeltabase = True
2405 destrevlog._lazydeltabase = True
2404 destrevlog._lazydelta = True
2406 destrevlog._lazydelta = True
2405 elif deltareuse == self.DELTAREUSESAMEREVS:
2407 elif deltareuse == self.DELTAREUSESAMEREVS:
2406 destrevlog._lazydeltabase = False
2408 destrevlog._lazydeltabase = False
2407 destrevlog._lazydelta = True
2409 destrevlog._lazydelta = True
2408 elif deltareuse == self.DELTAREUSENEVER:
2410 elif deltareuse == self.DELTAREUSENEVER:
2409 destrevlog._lazydeltabase = False
2411 destrevlog._lazydeltabase = False
2410 destrevlog._lazydelta = False
2412 destrevlog._lazydelta = False
2411
2413
2412 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2414 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2413
2415
2414 deltacomputer = deltautil.deltacomputer(destrevlog)
2416 deltacomputer = deltautil.deltacomputer(destrevlog)
2415 index = self.index
2417 index = self.index
2416 for rev in self:
2418 for rev in self:
2417 entry = index[rev]
2419 entry = index[rev]
2418
2420
2419 # Some classes override linkrev to take filtered revs into
2421 # Some classes override linkrev to take filtered revs into
2420 # account. Use raw entry from index.
2422 # account. Use raw entry from index.
2421 flags = entry[0] & 0xffff
2423 flags = entry[0] & 0xffff
2422 linkrev = entry[4]
2424 linkrev = entry[4]
2423 p1 = index[entry[5]][7]
2425 p1 = index[entry[5]][7]
2424 p2 = index[entry[6]][7]
2426 p2 = index[entry[6]][7]
2425 node = entry[7]
2427 node = entry[7]
2426
2428
2427 # (Possibly) reuse the delta from the revlog if allowed and
2429 # (Possibly) reuse the delta from the revlog if allowed and
2428 # the revlog chunk is a delta.
2430 # the revlog chunk is a delta.
2429 cachedelta = None
2431 cachedelta = None
2430 rawtext = None
2432 rawtext = None
2431 if (deltareuse != self.DELTAREUSEFULLADD
2433 if (deltareuse != self.DELTAREUSEFULLADD
2432 and destrevlog._lazydelta):
2434 and destrevlog._lazydelta):
2433 dp = self.deltaparent(rev)
2435 dp = self.deltaparent(rev)
2434 if dp != nullrev:
2436 if dp != nullrev:
2435 cachedelta = (dp, bytes(self._chunk(rev)))
2437 cachedelta = (dp, bytes(self._chunk(rev)))
2436
2438
2437 if not cachedelta:
2439 if not cachedelta:
2438 rawtext = self.rawdata(rev)
2440 rawtext = self.rawdata(rev)
2439
2441
2440
2442
2441 if deltareuse == self.DELTAREUSEFULLADD:
2443 if deltareuse == self.DELTAREUSEFULLADD:
2442 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2444 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2443 cachedelta=cachedelta,
2445 cachedelta=cachedelta,
2444 node=node, flags=flags,
2446 node=node, flags=flags,
2445 deltacomputer=deltacomputer)
2447 deltacomputer=deltacomputer)
2446 else:
2448 else:
2447 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2449 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2448 checkambig=False)
2450 checkambig=False)
2449 dfh = None
2451 dfh = None
2450 if not destrevlog._inline:
2452 if not destrevlog._inline:
2451 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2453 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2452 try:
2454 try:
2453 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2455 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2454 p2, flags, cachedelta, ifh, dfh,
2456 p2, flags, cachedelta, ifh, dfh,
2455 deltacomputer=deltacomputer)
2457 deltacomputer=deltacomputer)
2456 finally:
2458 finally:
2457 if dfh:
2459 if dfh:
2458 dfh.close()
2460 dfh.close()
2459 ifh.close()
2461 ifh.close()
2460
2462
2461 if addrevisioncb:
2463 if addrevisioncb:
2462 addrevisioncb(self, rev, node)
2464 addrevisioncb(self, rev, node)
2463 finally:
2465 finally:
2464 destrevlog._lazydelta = oldlazydelta
2466 destrevlog._lazydelta = oldlazydelta
2465 destrevlog._lazydeltabase = oldlazydeltabase
2467 destrevlog._lazydeltabase = oldlazydeltabase
2466 destrevlog._deltabothparents = oldamd
2468 destrevlog._deltabothparents = oldamd
2467
2469
2468 def censorrevision(self, tr, censornode, tombstone=b''):
2470 def censorrevision(self, tr, censornode, tombstone=b''):
2469 if (self.version & 0xFFFF) == REVLOGV0:
2471 if (self.version & 0xFFFF) == REVLOGV0:
2470 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2472 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2471 self.version)
2473 self.version)
2472
2474
2473 censorrev = self.rev(censornode)
2475 censorrev = self.rev(censornode)
2474 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2476 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2475
2477
2476 if len(tombstone) > self.rawsize(censorrev):
2478 if len(tombstone) > self.rawsize(censorrev):
2477 raise error.Abort(_('censor tombstone must be no longer than '
2479 raise error.Abort(_('censor tombstone must be no longer than '
2478 'censored data'))
2480 'censored data'))
2479
2481
2480 # Rewriting the revlog in place is hard. Our strategy for censoring is
2482 # Rewriting the revlog in place is hard. Our strategy for censoring is
2481 # to create a new revlog, copy all revisions to it, then replace the
2483 # to create a new revlog, copy all revisions to it, then replace the
2482 # revlogs on transaction close.
2484 # revlogs on transaction close.
2483
2485
2484 newindexfile = self.indexfile + b'.tmpcensored'
2486 newindexfile = self.indexfile + b'.tmpcensored'
2485 newdatafile = self.datafile + b'.tmpcensored'
2487 newdatafile = self.datafile + b'.tmpcensored'
2486
2488
2487 # This is a bit dangerous. We could easily have a mismatch of state.
2489 # This is a bit dangerous. We could easily have a mismatch of state.
2488 newrl = revlog(self.opener, newindexfile, newdatafile,
2490 newrl = revlog(self.opener, newindexfile, newdatafile,
2489 censorable=True)
2491 censorable=True)
2490 newrl.version = self.version
2492 newrl.version = self.version
2491 newrl._generaldelta = self._generaldelta
2493 newrl._generaldelta = self._generaldelta
2492 newrl._io = self._io
2494 newrl._io = self._io
2493
2495
2494 for rev in self.revs():
2496 for rev in self.revs():
2495 node = self.node(rev)
2497 node = self.node(rev)
2496 p1, p2 = self.parents(node)
2498 p1, p2 = self.parents(node)
2497
2499
2498 if rev == censorrev:
2500 if rev == censorrev:
2499 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2501 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2500 p1, p2, censornode, REVIDX_ISCENSORED)
2502 p1, p2, censornode, REVIDX_ISCENSORED)
2501
2503
2502 if newrl.deltaparent(rev) != nullrev:
2504 if newrl.deltaparent(rev) != nullrev:
2503 raise error.Abort(_('censored revision stored as delta; '
2505 raise error.Abort(_('censored revision stored as delta; '
2504 'cannot censor'),
2506 'cannot censor'),
2505 hint=_('censoring of revlogs is not '
2507 hint=_('censoring of revlogs is not '
2506 'fully implemented; please report '
2508 'fully implemented; please report '
2507 'this bug'))
2509 'this bug'))
2508 continue
2510 continue
2509
2511
2510 if self.iscensored(rev):
2512 if self.iscensored(rev):
2511 if self.deltaparent(rev) != nullrev:
2513 if self.deltaparent(rev) != nullrev:
2512 raise error.Abort(_('cannot censor due to censored '
2514 raise error.Abort(_('cannot censor due to censored '
2513 'revision having delta stored'))
2515 'revision having delta stored'))
2514 rawtext = self._chunk(rev)
2516 rawtext = self._chunk(rev)
2515 else:
2517 else:
2516 rawtext = self.rawdata(rev)
2518 rawtext = self.rawdata(rev)
2517
2519
2518 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2520 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2519 self.flags(rev))
2521 self.flags(rev))
2520
2522
2521 tr.addbackup(self.indexfile, location='store')
2523 tr.addbackup(self.indexfile, location='store')
2522 if not self._inline:
2524 if not self._inline:
2523 tr.addbackup(self.datafile, location='store')
2525 tr.addbackup(self.datafile, location='store')
2524
2526
2525 self.opener.rename(newrl.indexfile, self.indexfile)
2527 self.opener.rename(newrl.indexfile, self.indexfile)
2526 if not self._inline:
2528 if not self._inline:
2527 self.opener.rename(newrl.datafile, self.datafile)
2529 self.opener.rename(newrl.datafile, self.datafile)
2528
2530
2529 self.clearcaches()
2531 self.clearcaches()
2530 self._loadindex()
2532 self._loadindex()
2531
2533
2532 def verifyintegrity(self, state):
2534 def verifyintegrity(self, state):
2533 """Verifies the integrity of the revlog.
2535 """Verifies the integrity of the revlog.
2534
2536
2535 Yields ``revlogproblem`` instances describing problems that are
2537 Yields ``revlogproblem`` instances describing problems that are
2536 found.
2538 found.
2537 """
2539 """
2538 dd, di = self.checksize()
2540 dd, di = self.checksize()
2539 if dd:
2541 if dd:
2540 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2542 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2541 if di:
2543 if di:
2542 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2544 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2543
2545
2544 version = self.version & 0xFFFF
2546 version = self.version & 0xFFFF
2545
2547
2546 # The verifier tells us what version revlog we should be.
2548 # The verifier tells us what version revlog we should be.
2547 if version != state['expectedversion']:
2549 if version != state['expectedversion']:
2548 yield revlogproblem(
2550 yield revlogproblem(
2549 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2551 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2550 (self.indexfile, version, state['expectedversion']))
2552 (self.indexfile, version, state['expectedversion']))
2551
2553
2552 state['skipread'] = set()
2554 state['skipread'] = set()
2553
2555
2554 for rev in self:
2556 for rev in self:
2555 node = self.node(rev)
2557 node = self.node(rev)
2556
2558
2557 # Verify contents. 4 cases to care about:
2559 # Verify contents. 4 cases to care about:
2558 #
2560 #
2559 # common: the most common case
2561 # common: the most common case
2560 # rename: with a rename
2562 # rename: with a rename
2561 # meta: file content starts with b'\1\n', the metadata
2563 # meta: file content starts with b'\1\n', the metadata
2562 # header defined in filelog.py, but without a rename
2564 # header defined in filelog.py, but without a rename
2563 # ext: content stored externally
2565 # ext: content stored externally
2564 #
2566 #
2565 # More formally, their differences are shown below:
2567 # More formally, their differences are shown below:
2566 #
2568 #
2567 # | common | rename | meta | ext
2569 # | common | rename | meta | ext
2568 # -------------------------------------------------------
2570 # -------------------------------------------------------
2569 # flags() | 0 | 0 | 0 | not 0
2571 # flags() | 0 | 0 | 0 | not 0
2570 # renamed() | False | True | False | ?
2572 # renamed() | False | True | False | ?
2571 # rawtext[0:2]=='\1\n'| False | True | True | ?
2573 # rawtext[0:2]=='\1\n'| False | True | True | ?
2572 #
2574 #
2573 # "rawtext" means the raw text stored in revlog data, which
2575 # "rawtext" means the raw text stored in revlog data, which
2574 # could be retrieved by "rawdata(rev)". "text"
2576 # could be retrieved by "rawdata(rev)". "text"
2575 # mentioned below is "revision(rev)".
2577 # mentioned below is "revision(rev)".
2576 #
2578 #
2577 # There are 3 different lengths stored physically:
2579 # There are 3 different lengths stored physically:
2578 # 1. L1: rawsize, stored in revlog index
2580 # 1. L1: rawsize, stored in revlog index
2579 # 2. L2: len(rawtext), stored in revlog data
2581 # 2. L2: len(rawtext), stored in revlog data
2580 # 3. L3: len(text), stored in revlog data if flags==0, or
2582 # 3. L3: len(text), stored in revlog data if flags==0, or
2581 # possibly somewhere else if flags!=0
2583 # possibly somewhere else if flags!=0
2582 #
2584 #
2583 # L1 should be equal to L2. L3 could be different from them.
2585 # L1 should be equal to L2. L3 could be different from them.
2584 # "text" may or may not affect commit hash depending on flag
2586 # "text" may or may not affect commit hash depending on flag
2585 # processors (see flagutil.addflagprocessor).
2587 # processors (see flagutil.addflagprocessor).
2586 #
2588 #
2587 # | common | rename | meta | ext
2589 # | common | rename | meta | ext
2588 # -------------------------------------------------
2590 # -------------------------------------------------
2589 # rawsize() | L1 | L1 | L1 | L1
2591 # rawsize() | L1 | L1 | L1 | L1
2590 # size() | L1 | L2-LM | L1(*) | L1 (?)
2592 # size() | L1 | L2-LM | L1(*) | L1 (?)
2591 # len(rawtext) | L2 | L2 | L2 | L2
2593 # len(rawtext) | L2 | L2 | L2 | L2
2592 # len(text) | L2 | L2 | L2 | L3
2594 # len(text) | L2 | L2 | L2 | L3
2593 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2595 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2594 #
2596 #
2595 # LM: length of metadata, depending on rawtext
2597 # LM: length of metadata, depending on rawtext
2596 # (*): not ideal, see comment in filelog.size
2598 # (*): not ideal, see comment in filelog.size
2597 # (?): could be "- len(meta)" if the resolved content has
2599 # (?): could be "- len(meta)" if the resolved content has
2598 # rename metadata
2600 # rename metadata
2599 #
2601 #
2600 # Checks needed to be done:
2602 # Checks needed to be done:
2601 # 1. length check: L1 == L2, in all cases.
2603 # 1. length check: L1 == L2, in all cases.
2602 # 2. hash check: depending on flag processor, we may need to
2604 # 2. hash check: depending on flag processor, we may need to
2603 # use either "text" (external), or "rawtext" (in revlog).
2605 # use either "text" (external), or "rawtext" (in revlog).
2604
2606
2605 try:
2607 try:
2606 skipflags = state.get('skipflags', 0)
2608 skipflags = state.get('skipflags', 0)
2607 if skipflags:
2609 if skipflags:
2608 skipflags &= self.flags(rev)
2610 skipflags &= self.flags(rev)
2609
2611
2610 if skipflags:
2612 if skipflags:
2611 state['skipread'].add(node)
2613 state['skipread'].add(node)
2612 else:
2614 else:
2613 # Side-effect: read content and verify hash.
2615 # Side-effect: read content and verify hash.
2614 self.revision(node)
2616 self.revision(node)
2615
2617
2616 l1 = self.rawsize(rev)
2618 l1 = self.rawsize(rev)
2617 l2 = len(self.rawdata(node))
2619 l2 = len(self.rawdata(node))
2618
2620
2619 if l1 != l2:
2621 if l1 != l2:
2620 yield revlogproblem(
2622 yield revlogproblem(
2621 error=_('unpacked size is %d, %d expected') % (l2, l1),
2623 error=_('unpacked size is %d, %d expected') % (l2, l1),
2622 node=node)
2624 node=node)
2623
2625
2624 except error.CensoredNodeError:
2626 except error.CensoredNodeError:
2625 if state['erroroncensored']:
2627 if state['erroroncensored']:
2626 yield revlogproblem(error=_('censored file data'),
2628 yield revlogproblem(error=_('censored file data'),
2627 node=node)
2629 node=node)
2628 state['skipread'].add(node)
2630 state['skipread'].add(node)
2629 except Exception as e:
2631 except Exception as e:
2630 yield revlogproblem(
2632 yield revlogproblem(
2631 error=_('unpacking %s: %s') % (short(node),
2633 error=_('unpacking %s: %s') % (short(node),
2632 stringutil.forcebytestr(e)),
2634 stringutil.forcebytestr(e)),
2633 node=node)
2635 node=node)
2634 state['skipread'].add(node)
2636 state['skipread'].add(node)
2635
2637
2636 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2638 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2637 revisionscount=False, trackedsize=False,
2639 revisionscount=False, trackedsize=False,
2638 storedsize=False):
2640 storedsize=False):
2639 d = {}
2641 d = {}
2640
2642
2641 if exclusivefiles:
2643 if exclusivefiles:
2642 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2644 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2643 if not self._inline:
2645 if not self._inline:
2644 d['exclusivefiles'].append((self.opener, self.datafile))
2646 d['exclusivefiles'].append((self.opener, self.datafile))
2645
2647
2646 if sharedfiles:
2648 if sharedfiles:
2647 d['sharedfiles'] = []
2649 d['sharedfiles'] = []
2648
2650
2649 if revisionscount:
2651 if revisionscount:
2650 d['revisionscount'] = len(self)
2652 d['revisionscount'] = len(self)
2651
2653
2652 if trackedsize:
2654 if trackedsize:
2653 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2655 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2654
2656
2655 if storedsize:
2657 if storedsize:
2656 d['storedsize'] = sum(self.opener.stat(path).st_size
2658 d['storedsize'] = sum(self.opener.stat(path).st_size
2657 for path in self.files())
2659 for path in self.files())
2658
2660
2659 return d
2661 return d
General Comments 0
You need to be logged in to leave comments. Login now