##// END OF EJS Templates
flagutil: move addflagprocessor to the new module (API)
marmoute -
r42958:6d61be15 default
parent child Browse files
Show More
@@ -1,2686 +1,2659 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 """Storage back-end for Mercurial.
8 """Storage back-end for Mercurial.
9
9
10 This provides efficient delta storage with O(1) retrieve and append
10 This provides efficient delta storage with O(1) retrieve and append
11 and O(changes) merge between branches.
11 and O(changes) merge between branches.
12 """
12 """
13
13
14 from __future__ import absolute_import
14 from __future__ import absolute_import
15
15
16 import collections
16 import collections
17 import contextlib
17 import contextlib
18 import errno
18 import errno
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import zlib
22 import zlib
23
23
24 # import stuff from node for others to import from revlog
24 # import stuff from node for others to import from revlog
25 from .node import (
25 from .node import (
26 bin,
26 bin,
27 hex,
27 hex,
28 nullhex,
28 nullhex,
29 nullid,
29 nullid,
30 nullrev,
30 nullrev,
31 short,
31 short,
32 wdirfilenodeids,
32 wdirfilenodeids,
33 wdirhex,
33 wdirhex,
34 wdirid,
34 wdirid,
35 wdirrev,
35 wdirrev,
36 )
36 )
37 from .i18n import _
37 from .i18n import _
38 from .revlogutils.constants import (
38 from .revlogutils.constants import (
39 FLAG_GENERALDELTA,
39 FLAG_GENERALDELTA,
40 FLAG_INLINE_DATA,
40 FLAG_INLINE_DATA,
41 REVLOGV0,
41 REVLOGV0,
42 REVLOGV1,
42 REVLOGV1,
43 REVLOGV1_FLAGS,
43 REVLOGV1_FLAGS,
44 REVLOGV2,
44 REVLOGV2,
45 REVLOGV2_FLAGS,
45 REVLOGV2_FLAGS,
46 REVLOG_DEFAULT_FLAGS,
46 REVLOG_DEFAULT_FLAGS,
47 REVLOG_DEFAULT_FORMAT,
47 REVLOG_DEFAULT_FORMAT,
48 REVLOG_DEFAULT_VERSION,
48 REVLOG_DEFAULT_VERSION,
49 )
49 )
50 from .revlogutils.flagutil import (
50 from .revlogutils.flagutil import (
51 REVIDX_DEFAULT_FLAGS,
51 REVIDX_DEFAULT_FLAGS,
52 REVIDX_ELLIPSIS,
52 REVIDX_ELLIPSIS,
53 REVIDX_EXTSTORED,
53 REVIDX_EXTSTORED,
54 REVIDX_FLAGS_ORDER,
54 REVIDX_FLAGS_ORDER,
55 REVIDX_ISCENSORED,
55 REVIDX_ISCENSORED,
56 REVIDX_RAWTEXT_CHANGING_FLAGS,
56 REVIDX_RAWTEXT_CHANGING_FLAGS,
57 )
57 )
58 from .thirdparty import (
58 from .thirdparty import (
59 attr,
59 attr,
60 )
60 )
61 from . import (
61 from . import (
62 ancestor,
62 ancestor,
63 dagop,
63 dagop,
64 error,
64 error,
65 mdiff,
65 mdiff,
66 policy,
66 policy,
67 pycompat,
67 pycompat,
68 repository,
68 repository,
69 templatefilters,
69 templatefilters,
70 util,
70 util,
71 )
71 )
72 from .revlogutils import (
72 from .revlogutils import (
73 deltas as deltautil,
73 deltas as deltautil,
74 flagutil,
74 flagutil,
75 )
75 )
76 from .utils import (
76 from .utils import (
77 interfaceutil,
77 interfaceutil,
78 storageutil,
78 storageutil,
79 stringutil,
79 stringutil,
80 )
80 )
81
81
82 # blanked usage of all the name to prevent pyflakes constraints
82 # blanked usage of all the name to prevent pyflakes constraints
83 # We need these name available in the module for extensions.
83 # We need these name available in the module for extensions.
84 REVLOGV0
84 REVLOGV0
85 REVLOGV1
85 REVLOGV1
86 REVLOGV2
86 REVLOGV2
87 FLAG_INLINE_DATA
87 FLAG_INLINE_DATA
88 FLAG_GENERALDELTA
88 FLAG_GENERALDELTA
89 REVLOG_DEFAULT_FLAGS
89 REVLOG_DEFAULT_FLAGS
90 REVLOG_DEFAULT_FORMAT
90 REVLOG_DEFAULT_FORMAT
91 REVLOG_DEFAULT_VERSION
91 REVLOG_DEFAULT_VERSION
92 REVLOGV1_FLAGS
92 REVLOGV1_FLAGS
93 REVLOGV2_FLAGS
93 REVLOGV2_FLAGS
94 REVIDX_ISCENSORED
94 REVIDX_ISCENSORED
95 REVIDX_ELLIPSIS
95 REVIDX_ELLIPSIS
96 REVIDX_EXTSTORED
96 REVIDX_EXTSTORED
97 REVIDX_DEFAULT_FLAGS
97 REVIDX_DEFAULT_FLAGS
98 REVIDX_FLAGS_ORDER
98 REVIDX_FLAGS_ORDER
99 REVIDX_RAWTEXT_CHANGING_FLAGS
99 REVIDX_RAWTEXT_CHANGING_FLAGS
100
100
101 parsers = policy.importmod(r'parsers')
101 parsers = policy.importmod(r'parsers')
102 rustancestor = policy.importrust(r'ancestor')
102 rustancestor = policy.importrust(r'ancestor')
103 rustdagop = policy.importrust(r'dagop')
103 rustdagop = policy.importrust(r'dagop')
104
104
105 # Aliased for performance.
105 # Aliased for performance.
106 _zlibdecompress = zlib.decompress
106 _zlibdecompress = zlib.decompress
107
107
108 # max size of revlog with inline data
108 # max size of revlog with inline data
109 _maxinline = 131072
109 _maxinline = 131072
110 _chunksize = 1048576
110 _chunksize = 1048576
111
111
112 # Flag processors for REVIDX_ELLIPSIS.
112 # Flag processors for REVIDX_ELLIPSIS.
113 def ellipsisreadprocessor(rl, text):
113 def ellipsisreadprocessor(rl, text):
114 return text, False
114 return text, False
115
115
116 def ellipsiswriteprocessor(rl, text):
116 def ellipsiswriteprocessor(rl, text):
117 return text, False
117 return text, False
118
118
119 def ellipsisrawprocessor(rl, text):
119 def ellipsisrawprocessor(rl, text):
120 return False
120 return False
121
121
122 ellipsisprocessor = (
122 ellipsisprocessor = (
123 ellipsisreadprocessor,
123 ellipsisreadprocessor,
124 ellipsiswriteprocessor,
124 ellipsiswriteprocessor,
125 ellipsisrawprocessor,
125 ellipsisrawprocessor,
126 )
126 )
127
127
128 def addflagprocessor(flag, processor):
129 """Register a flag processor on a revision data flag.
130
131 Invariant:
132 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
133 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
134 - Only one flag processor can be registered on a specific flag.
135 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
136 following signatures:
137 - (read) f(self, rawtext) -> text, bool
138 - (write) f(self, text) -> rawtext, bool
139 - (raw) f(self, rawtext) -> bool
140 "text" is presented to the user. "rawtext" is stored in revlog data, not
141 directly visible to the user.
142 The boolean returned by these transforms is used to determine whether
143 the returned text can be used for hash integrity checking. For example,
144 if "write" returns False, then "text" is used to generate hash. If
145 "write" returns True, that basically means "rawtext" returned by "write"
146 should be used to generate hash. Usually, "write" and "read" return
147 different booleans. And "raw" returns a same boolean as "write".
148
149 Note: The 'raw' transform is used for changegroup generation and in some
150 debug commands. In this case the transform only indicates whether the
151 contents can be used for hash integrity checks.
152 """
153 flagutil.insertflagprocessor(flag, processor, flagutil.flagprocessors)
154
155 def getoffset(q):
128 def getoffset(q):
156 return int(q >> 16)
129 return int(q >> 16)
157
130
158 def gettype(q):
131 def gettype(q):
159 return int(q & 0xFFFF)
132 return int(q & 0xFFFF)
160
133
161 def offset_type(offset, type):
134 def offset_type(offset, type):
162 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
135 if (type & ~flagutil.REVIDX_KNOWN_FLAGS) != 0:
163 raise ValueError('unknown revlog index flags')
136 raise ValueError('unknown revlog index flags')
164 return int(int(offset) << 16 | type)
137 return int(int(offset) << 16 | type)
165
138
166 @attr.s(slots=True, frozen=True)
139 @attr.s(slots=True, frozen=True)
167 class _revisioninfo(object):
140 class _revisioninfo(object):
168 """Information about a revision that allows building its fulltext
141 """Information about a revision that allows building its fulltext
169 node: expected hash of the revision
142 node: expected hash of the revision
170 p1, p2: parent revs of the revision
143 p1, p2: parent revs of the revision
171 btext: built text cache consisting of a one-element list
144 btext: built text cache consisting of a one-element list
172 cachedelta: (baserev, uncompressed_delta) or None
145 cachedelta: (baserev, uncompressed_delta) or None
173 flags: flags associated to the revision storage
146 flags: flags associated to the revision storage
174
147
175 One of btext[0] or cachedelta must be set.
148 One of btext[0] or cachedelta must be set.
176 """
149 """
177 node = attr.ib()
150 node = attr.ib()
178 p1 = attr.ib()
151 p1 = attr.ib()
179 p2 = attr.ib()
152 p2 = attr.ib()
180 btext = attr.ib()
153 btext = attr.ib()
181 textlen = attr.ib()
154 textlen = attr.ib()
182 cachedelta = attr.ib()
155 cachedelta = attr.ib()
183 flags = attr.ib()
156 flags = attr.ib()
184
157
185 @interfaceutil.implementer(repository.irevisiondelta)
158 @interfaceutil.implementer(repository.irevisiondelta)
186 @attr.s(slots=True)
159 @attr.s(slots=True)
187 class revlogrevisiondelta(object):
160 class revlogrevisiondelta(object):
188 node = attr.ib()
161 node = attr.ib()
189 p1node = attr.ib()
162 p1node = attr.ib()
190 p2node = attr.ib()
163 p2node = attr.ib()
191 basenode = attr.ib()
164 basenode = attr.ib()
192 flags = attr.ib()
165 flags = attr.ib()
193 baserevisionsize = attr.ib()
166 baserevisionsize = attr.ib()
194 revision = attr.ib()
167 revision = attr.ib()
195 delta = attr.ib()
168 delta = attr.ib()
196 linknode = attr.ib(default=None)
169 linknode = attr.ib(default=None)
197
170
198 @interfaceutil.implementer(repository.iverifyproblem)
171 @interfaceutil.implementer(repository.iverifyproblem)
199 @attr.s(frozen=True)
172 @attr.s(frozen=True)
200 class revlogproblem(object):
173 class revlogproblem(object):
201 warning = attr.ib(default=None)
174 warning = attr.ib(default=None)
202 error = attr.ib(default=None)
175 error = attr.ib(default=None)
203 node = attr.ib(default=None)
176 node = attr.ib(default=None)
204
177
205 # index v0:
178 # index v0:
206 # 4 bytes: offset
179 # 4 bytes: offset
207 # 4 bytes: compressed length
180 # 4 bytes: compressed length
208 # 4 bytes: base rev
181 # 4 bytes: base rev
209 # 4 bytes: link rev
182 # 4 bytes: link rev
210 # 20 bytes: parent 1 nodeid
183 # 20 bytes: parent 1 nodeid
211 # 20 bytes: parent 2 nodeid
184 # 20 bytes: parent 2 nodeid
212 # 20 bytes: nodeid
185 # 20 bytes: nodeid
213 indexformatv0 = struct.Struct(">4l20s20s20s")
186 indexformatv0 = struct.Struct(">4l20s20s20s")
214 indexformatv0_pack = indexformatv0.pack
187 indexformatv0_pack = indexformatv0.pack
215 indexformatv0_unpack = indexformatv0.unpack
188 indexformatv0_unpack = indexformatv0.unpack
216
189
217 class revlogoldindex(list):
190 class revlogoldindex(list):
218 def __getitem__(self, i):
191 def __getitem__(self, i):
219 if i == -1:
192 if i == -1:
220 return (0, 0, 0, -1, -1, -1, -1, nullid)
193 return (0, 0, 0, -1, -1, -1, -1, nullid)
221 return list.__getitem__(self, i)
194 return list.__getitem__(self, i)
222
195
223 class revlogoldio(object):
196 class revlogoldio(object):
224 def __init__(self):
197 def __init__(self):
225 self.size = indexformatv0.size
198 self.size = indexformatv0.size
226
199
227 def parseindex(self, data, inline):
200 def parseindex(self, data, inline):
228 s = self.size
201 s = self.size
229 index = []
202 index = []
230 nodemap = {nullid: nullrev}
203 nodemap = {nullid: nullrev}
231 n = off = 0
204 n = off = 0
232 l = len(data)
205 l = len(data)
233 while off + s <= l:
206 while off + s <= l:
234 cur = data[off:off + s]
207 cur = data[off:off + s]
235 off += s
208 off += s
236 e = indexformatv0_unpack(cur)
209 e = indexformatv0_unpack(cur)
237 # transform to revlogv1 format
210 # transform to revlogv1 format
238 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
211 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
239 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
212 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
240 index.append(e2)
213 index.append(e2)
241 nodemap[e[6]] = n
214 nodemap[e[6]] = n
242 n += 1
215 n += 1
243
216
244 return revlogoldindex(index), nodemap, None
217 return revlogoldindex(index), nodemap, None
245
218
246 def packentry(self, entry, node, version, rev):
219 def packentry(self, entry, node, version, rev):
247 if gettype(entry[0]):
220 if gettype(entry[0]):
248 raise error.RevlogError(_('index entry flags need revlog '
221 raise error.RevlogError(_('index entry flags need revlog '
249 'version 1'))
222 'version 1'))
250 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
223 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
251 node(entry[5]), node(entry[6]), entry[7])
224 node(entry[5]), node(entry[6]), entry[7])
252 return indexformatv0_pack(*e2)
225 return indexformatv0_pack(*e2)
253
226
254 # index ng:
227 # index ng:
255 # 6 bytes: offset
228 # 6 bytes: offset
256 # 2 bytes: flags
229 # 2 bytes: flags
257 # 4 bytes: compressed length
230 # 4 bytes: compressed length
258 # 4 bytes: uncompressed length
231 # 4 bytes: uncompressed length
259 # 4 bytes: base rev
232 # 4 bytes: base rev
260 # 4 bytes: link rev
233 # 4 bytes: link rev
261 # 4 bytes: parent 1 rev
234 # 4 bytes: parent 1 rev
262 # 4 bytes: parent 2 rev
235 # 4 bytes: parent 2 rev
263 # 32 bytes: nodeid
236 # 32 bytes: nodeid
264 indexformatng = struct.Struct(">Qiiiiii20s12x")
237 indexformatng = struct.Struct(">Qiiiiii20s12x")
265 indexformatng_pack = indexformatng.pack
238 indexformatng_pack = indexformatng.pack
266 versionformat = struct.Struct(">I")
239 versionformat = struct.Struct(">I")
267 versionformat_pack = versionformat.pack
240 versionformat_pack = versionformat.pack
268 versionformat_unpack = versionformat.unpack
241 versionformat_unpack = versionformat.unpack
269
242
270 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
243 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
271 # signed integer)
244 # signed integer)
272 _maxentrysize = 0x7fffffff
245 _maxentrysize = 0x7fffffff
273
246
274 class revlogio(object):
247 class revlogio(object):
275 def __init__(self):
248 def __init__(self):
276 self.size = indexformatng.size
249 self.size = indexformatng.size
277
250
278 def parseindex(self, data, inline):
251 def parseindex(self, data, inline):
279 # call the C implementation to parse the index data
252 # call the C implementation to parse the index data
280 index, cache = parsers.parse_index2(data, inline)
253 index, cache = parsers.parse_index2(data, inline)
281 return index, getattr(index, 'nodemap', None), cache
254 return index, getattr(index, 'nodemap', None), cache
282
255
283 def packentry(self, entry, node, version, rev):
256 def packentry(self, entry, node, version, rev):
284 p = indexformatng_pack(*entry)
257 p = indexformatng_pack(*entry)
285 if rev == 0:
258 if rev == 0:
286 p = versionformat_pack(version) + p[4:]
259 p = versionformat_pack(version) + p[4:]
287 return p
260 return p
288
261
289 class revlog(object):
262 class revlog(object):
290 """
263 """
291 the underlying revision storage object
264 the underlying revision storage object
292
265
293 A revlog consists of two parts, an index and the revision data.
266 A revlog consists of two parts, an index and the revision data.
294
267
295 The index is a file with a fixed record size containing
268 The index is a file with a fixed record size containing
296 information on each revision, including its nodeid (hash), the
269 information on each revision, including its nodeid (hash), the
297 nodeids of its parents, the position and offset of its data within
270 nodeids of its parents, the position and offset of its data within
298 the data file, and the revision it's based on. Finally, each entry
271 the data file, and the revision it's based on. Finally, each entry
299 contains a linkrev entry that can serve as a pointer to external
272 contains a linkrev entry that can serve as a pointer to external
300 data.
273 data.
301
274
302 The revision data itself is a linear collection of data chunks.
275 The revision data itself is a linear collection of data chunks.
303 Each chunk represents a revision and is usually represented as a
276 Each chunk represents a revision and is usually represented as a
304 delta against the previous chunk. To bound lookup time, runs of
277 delta against the previous chunk. To bound lookup time, runs of
305 deltas are limited to about 2 times the length of the original
278 deltas are limited to about 2 times the length of the original
306 version data. This makes retrieval of a version proportional to
279 version data. This makes retrieval of a version proportional to
307 its size, or O(1) relative to the number of revisions.
280 its size, or O(1) relative to the number of revisions.
308
281
309 Both pieces of the revlog are written to in an append-only
282 Both pieces of the revlog are written to in an append-only
310 fashion, which means we never need to rewrite a file to insert or
283 fashion, which means we never need to rewrite a file to insert or
311 remove data, and can use some simple techniques to avoid the need
284 remove data, and can use some simple techniques to avoid the need
312 for locking while reading.
285 for locking while reading.
313
286
314 If checkambig, indexfile is opened with checkambig=True at
287 If checkambig, indexfile is opened with checkambig=True at
315 writing, to avoid file stat ambiguity.
288 writing, to avoid file stat ambiguity.
316
289
317 If mmaplargeindex is True, and an mmapindexthreshold is set, the
290 If mmaplargeindex is True, and an mmapindexthreshold is set, the
318 index will be mmapped rather than read if it is larger than the
291 index will be mmapped rather than read if it is larger than the
319 configured threshold.
292 configured threshold.
320
293
321 If censorable is True, the revlog can have censored revisions.
294 If censorable is True, the revlog can have censored revisions.
322
295
323 If `upperboundcomp` is not None, this is the expected maximal gain from
296 If `upperboundcomp` is not None, this is the expected maximal gain from
324 compression for the data content.
297 compression for the data content.
325 """
298 """
326 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
299 def __init__(self, opener, indexfile, datafile=None, checkambig=False,
327 mmaplargeindex=False, censorable=False,
300 mmaplargeindex=False, censorable=False,
328 upperboundcomp=None):
301 upperboundcomp=None):
329 """
302 """
330 create a revlog object
303 create a revlog object
331
304
332 opener is a function that abstracts the file opening operation
305 opener is a function that abstracts the file opening operation
333 and can be used to implement COW semantics or the like.
306 and can be used to implement COW semantics or the like.
334
307
335 """
308 """
336 self.upperboundcomp = upperboundcomp
309 self.upperboundcomp = upperboundcomp
337 self.indexfile = indexfile
310 self.indexfile = indexfile
338 self.datafile = datafile or (indexfile[:-2] + ".d")
311 self.datafile = datafile or (indexfile[:-2] + ".d")
339 self.opener = opener
312 self.opener = opener
340 # When True, indexfile is opened with checkambig=True at writing, to
313 # When True, indexfile is opened with checkambig=True at writing, to
341 # avoid file stat ambiguity.
314 # avoid file stat ambiguity.
342 self._checkambig = checkambig
315 self._checkambig = checkambig
343 self._mmaplargeindex = mmaplargeindex
316 self._mmaplargeindex = mmaplargeindex
344 self._censorable = censorable
317 self._censorable = censorable
345 # 3-tuple of (node, rev, text) for a raw revision.
318 # 3-tuple of (node, rev, text) for a raw revision.
346 self._revisioncache = None
319 self._revisioncache = None
347 # Maps rev to chain base rev.
320 # Maps rev to chain base rev.
348 self._chainbasecache = util.lrucachedict(100)
321 self._chainbasecache = util.lrucachedict(100)
349 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
322 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
350 self._chunkcache = (0, '')
323 self._chunkcache = (0, '')
351 # How much data to read and cache into the raw revlog data cache.
324 # How much data to read and cache into the raw revlog data cache.
352 self._chunkcachesize = 65536
325 self._chunkcachesize = 65536
353 self._maxchainlen = None
326 self._maxchainlen = None
354 self._deltabothparents = True
327 self._deltabothparents = True
355 self.index = []
328 self.index = []
356 # Mapping of partial identifiers to full nodes.
329 # Mapping of partial identifiers to full nodes.
357 self._pcache = {}
330 self._pcache = {}
358 # Mapping of revision integer to full node.
331 # Mapping of revision integer to full node.
359 self._nodecache = {nullid: nullrev}
332 self._nodecache = {nullid: nullrev}
360 self._nodepos = None
333 self._nodepos = None
361 self._compengine = 'zlib'
334 self._compengine = 'zlib'
362 self._compengineopts = {}
335 self._compengineopts = {}
363 self._maxdeltachainspan = -1
336 self._maxdeltachainspan = -1
364 self._withsparseread = False
337 self._withsparseread = False
365 self._sparserevlog = False
338 self._sparserevlog = False
366 self._srdensitythreshold = 0.50
339 self._srdensitythreshold = 0.50
367 self._srmingapsize = 262144
340 self._srmingapsize = 262144
368
341
369 # Make copy of flag processors so each revlog instance can support
342 # Make copy of flag processors so each revlog instance can support
370 # custom flags.
343 # custom flags.
371 self._flagprocessors = dict(flagutil.flagprocessors)
344 self._flagprocessors = dict(flagutil.flagprocessors)
372
345
373 # 2-tuple of file handles being used for active writing.
346 # 2-tuple of file handles being used for active writing.
374 self._writinghandles = None
347 self._writinghandles = None
375
348
376 self._loadindex()
349 self._loadindex()
377
350
378 def _loadindex(self):
351 def _loadindex(self):
379 mmapindexthreshold = None
352 mmapindexthreshold = None
380 opts = getattr(self.opener, 'options', {}) or {}
353 opts = getattr(self.opener, 'options', {}) or {}
381
354
382 if 'revlogv2' in opts:
355 if 'revlogv2' in opts:
383 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
356 newversionflags = REVLOGV2 | FLAG_INLINE_DATA
384 elif 'revlogv1' in opts:
357 elif 'revlogv1' in opts:
385 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
358 newversionflags = REVLOGV1 | FLAG_INLINE_DATA
386 if 'generaldelta' in opts:
359 if 'generaldelta' in opts:
387 newversionflags |= FLAG_GENERALDELTA
360 newversionflags |= FLAG_GENERALDELTA
388 elif getattr(self.opener, 'options', None) is not None:
361 elif getattr(self.opener, 'options', None) is not None:
389 # If options provided but no 'revlog*' found, the repository
362 # If options provided but no 'revlog*' found, the repository
390 # would have no 'requires' file in it, which means we have to
363 # would have no 'requires' file in it, which means we have to
391 # stick to the old format.
364 # stick to the old format.
392 newversionflags = REVLOGV0
365 newversionflags = REVLOGV0
393 else:
366 else:
394 newversionflags = REVLOG_DEFAULT_VERSION
367 newversionflags = REVLOG_DEFAULT_VERSION
395
368
396 if 'chunkcachesize' in opts:
369 if 'chunkcachesize' in opts:
397 self._chunkcachesize = opts['chunkcachesize']
370 self._chunkcachesize = opts['chunkcachesize']
398 if 'maxchainlen' in opts:
371 if 'maxchainlen' in opts:
399 self._maxchainlen = opts['maxchainlen']
372 self._maxchainlen = opts['maxchainlen']
400 if 'deltabothparents' in opts:
373 if 'deltabothparents' in opts:
401 self._deltabothparents = opts['deltabothparents']
374 self._deltabothparents = opts['deltabothparents']
402 self._lazydelta = bool(opts.get('lazydelta', True))
375 self._lazydelta = bool(opts.get('lazydelta', True))
403 self._lazydeltabase = False
376 self._lazydeltabase = False
404 if self._lazydelta:
377 if self._lazydelta:
405 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
378 self._lazydeltabase = bool(opts.get('lazydeltabase', False))
406 if 'compengine' in opts:
379 if 'compengine' in opts:
407 self._compengine = opts['compengine']
380 self._compengine = opts['compengine']
408 if 'zlib.level' in opts:
381 if 'zlib.level' in opts:
409 self._compengineopts['zlib.level'] = opts['zlib.level']
382 self._compengineopts['zlib.level'] = opts['zlib.level']
410 if 'zstd.level' in opts:
383 if 'zstd.level' in opts:
411 self._compengineopts['zstd.level'] = opts['zstd.level']
384 self._compengineopts['zstd.level'] = opts['zstd.level']
412 if 'maxdeltachainspan' in opts:
385 if 'maxdeltachainspan' in opts:
413 self._maxdeltachainspan = opts['maxdeltachainspan']
386 self._maxdeltachainspan = opts['maxdeltachainspan']
414 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
387 if self._mmaplargeindex and 'mmapindexthreshold' in opts:
415 mmapindexthreshold = opts['mmapindexthreshold']
388 mmapindexthreshold = opts['mmapindexthreshold']
416 self._sparserevlog = bool(opts.get('sparse-revlog', False))
389 self._sparserevlog = bool(opts.get('sparse-revlog', False))
417 withsparseread = bool(opts.get('with-sparse-read', False))
390 withsparseread = bool(opts.get('with-sparse-read', False))
418 # sparse-revlog forces sparse-read
391 # sparse-revlog forces sparse-read
419 self._withsparseread = self._sparserevlog or withsparseread
392 self._withsparseread = self._sparserevlog or withsparseread
420 if 'sparse-read-density-threshold' in opts:
393 if 'sparse-read-density-threshold' in opts:
421 self._srdensitythreshold = opts['sparse-read-density-threshold']
394 self._srdensitythreshold = opts['sparse-read-density-threshold']
422 if 'sparse-read-min-gap-size' in opts:
395 if 'sparse-read-min-gap-size' in opts:
423 self._srmingapsize = opts['sparse-read-min-gap-size']
396 self._srmingapsize = opts['sparse-read-min-gap-size']
424 if opts.get('enableellipsis'):
397 if opts.get('enableellipsis'):
425 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
398 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
426
399
427 # revlog v0 doesn't have flag processors
400 # revlog v0 doesn't have flag processors
428 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
401 for flag, processor in opts.get(b'flagprocessors', {}).iteritems():
429 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
402 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
430
403
431 if self._chunkcachesize <= 0:
404 if self._chunkcachesize <= 0:
432 raise error.RevlogError(_('revlog chunk cache size %r is not '
405 raise error.RevlogError(_('revlog chunk cache size %r is not '
433 'greater than 0') % self._chunkcachesize)
406 'greater than 0') % self._chunkcachesize)
434 elif self._chunkcachesize & (self._chunkcachesize - 1):
407 elif self._chunkcachesize & (self._chunkcachesize - 1):
435 raise error.RevlogError(_('revlog chunk cache size %r is not a '
408 raise error.RevlogError(_('revlog chunk cache size %r is not a '
436 'power of 2') % self._chunkcachesize)
409 'power of 2') % self._chunkcachesize)
437
410
438 indexdata = ''
411 indexdata = ''
439 self._initempty = True
412 self._initempty = True
440 try:
413 try:
441 with self._indexfp() as f:
414 with self._indexfp() as f:
442 if (mmapindexthreshold is not None and
415 if (mmapindexthreshold is not None and
443 self.opener.fstat(f).st_size >= mmapindexthreshold):
416 self.opener.fstat(f).st_size >= mmapindexthreshold):
444 # TODO: should .close() to release resources without
417 # TODO: should .close() to release resources without
445 # relying on Python GC
418 # relying on Python GC
446 indexdata = util.buffer(util.mmapread(f))
419 indexdata = util.buffer(util.mmapread(f))
447 else:
420 else:
448 indexdata = f.read()
421 indexdata = f.read()
449 if len(indexdata) > 0:
422 if len(indexdata) > 0:
450 versionflags = versionformat_unpack(indexdata[:4])[0]
423 versionflags = versionformat_unpack(indexdata[:4])[0]
451 self._initempty = False
424 self._initempty = False
452 else:
425 else:
453 versionflags = newversionflags
426 versionflags = newversionflags
454 except IOError as inst:
427 except IOError as inst:
455 if inst.errno != errno.ENOENT:
428 if inst.errno != errno.ENOENT:
456 raise
429 raise
457
430
458 versionflags = newversionflags
431 versionflags = newversionflags
459
432
460 self.version = versionflags
433 self.version = versionflags
461
434
462 flags = versionflags & ~0xFFFF
435 flags = versionflags & ~0xFFFF
463 fmt = versionflags & 0xFFFF
436 fmt = versionflags & 0xFFFF
464
437
465 if fmt == REVLOGV0:
438 if fmt == REVLOGV0:
466 if flags:
439 if flags:
467 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
440 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
468 'revlog %s') %
441 'revlog %s') %
469 (flags >> 16, fmt, self.indexfile))
442 (flags >> 16, fmt, self.indexfile))
470
443
471 self._inline = False
444 self._inline = False
472 self._generaldelta = False
445 self._generaldelta = False
473
446
474 elif fmt == REVLOGV1:
447 elif fmt == REVLOGV1:
475 if flags & ~REVLOGV1_FLAGS:
448 if flags & ~REVLOGV1_FLAGS:
476 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
449 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
477 'revlog %s') %
450 'revlog %s') %
478 (flags >> 16, fmt, self.indexfile))
451 (flags >> 16, fmt, self.indexfile))
479
452
480 self._inline = versionflags & FLAG_INLINE_DATA
453 self._inline = versionflags & FLAG_INLINE_DATA
481 self._generaldelta = versionflags & FLAG_GENERALDELTA
454 self._generaldelta = versionflags & FLAG_GENERALDELTA
482
455
483 elif fmt == REVLOGV2:
456 elif fmt == REVLOGV2:
484 if flags & ~REVLOGV2_FLAGS:
457 if flags & ~REVLOGV2_FLAGS:
485 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
458 raise error.RevlogError(_('unknown flags (%#04x) in version %d '
486 'revlog %s') %
459 'revlog %s') %
487 (flags >> 16, fmt, self.indexfile))
460 (flags >> 16, fmt, self.indexfile))
488
461
489 self._inline = versionflags & FLAG_INLINE_DATA
462 self._inline = versionflags & FLAG_INLINE_DATA
490 # generaldelta implied by version 2 revlogs.
463 # generaldelta implied by version 2 revlogs.
491 self._generaldelta = True
464 self._generaldelta = True
492
465
493 else:
466 else:
494 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
467 raise error.RevlogError(_('unknown version (%d) in revlog %s') %
495 (fmt, self.indexfile))
468 (fmt, self.indexfile))
496 # sparse-revlog can't be on without general-delta (issue6056)
469 # sparse-revlog can't be on without general-delta (issue6056)
497 if not self._generaldelta:
470 if not self._generaldelta:
498 self._sparserevlog = False
471 self._sparserevlog = False
499
472
500 self._storedeltachains = True
473 self._storedeltachains = True
501
474
502 self._io = revlogio()
475 self._io = revlogio()
503 if self.version == REVLOGV0:
476 if self.version == REVLOGV0:
504 self._io = revlogoldio()
477 self._io = revlogoldio()
505 try:
478 try:
506 d = self._io.parseindex(indexdata, self._inline)
479 d = self._io.parseindex(indexdata, self._inline)
507 except (ValueError, IndexError):
480 except (ValueError, IndexError):
508 raise error.RevlogError(_("index %s is corrupted") %
481 raise error.RevlogError(_("index %s is corrupted") %
509 self.indexfile)
482 self.indexfile)
510 self.index, nodemap, self._chunkcache = d
483 self.index, nodemap, self._chunkcache = d
511 if nodemap is not None:
484 if nodemap is not None:
512 self.nodemap = self._nodecache = nodemap
485 self.nodemap = self._nodecache = nodemap
513 if not self._chunkcache:
486 if not self._chunkcache:
514 self._chunkclear()
487 self._chunkclear()
515 # revnum -> (chain-length, sum-delta-length)
488 # revnum -> (chain-length, sum-delta-length)
516 self._chaininfocache = {}
489 self._chaininfocache = {}
517 # revlog header -> revlog compressor
490 # revlog header -> revlog compressor
518 self._decompressors = {}
491 self._decompressors = {}
519
492
520 @util.propertycache
493 @util.propertycache
521 def _compressor(self):
494 def _compressor(self):
522 engine = util.compengines[self._compengine]
495 engine = util.compengines[self._compengine]
523 return engine.revlogcompressor(self._compengineopts)
496 return engine.revlogcompressor(self._compengineopts)
524
497
525 def _indexfp(self, mode='r'):
498 def _indexfp(self, mode='r'):
526 """file object for the revlog's index file"""
499 """file object for the revlog's index file"""
527 args = {r'mode': mode}
500 args = {r'mode': mode}
528 if mode != 'r':
501 if mode != 'r':
529 args[r'checkambig'] = self._checkambig
502 args[r'checkambig'] = self._checkambig
530 if mode == 'w':
503 if mode == 'w':
531 args[r'atomictemp'] = True
504 args[r'atomictemp'] = True
532 return self.opener(self.indexfile, **args)
505 return self.opener(self.indexfile, **args)
533
506
534 def _datafp(self, mode='r'):
507 def _datafp(self, mode='r'):
535 """file object for the revlog's data file"""
508 """file object for the revlog's data file"""
536 return self.opener(self.datafile, mode=mode)
509 return self.opener(self.datafile, mode=mode)
537
510
538 @contextlib.contextmanager
511 @contextlib.contextmanager
539 def _datareadfp(self, existingfp=None):
512 def _datareadfp(self, existingfp=None):
540 """file object suitable to read data"""
513 """file object suitable to read data"""
541 # Use explicit file handle, if given.
514 # Use explicit file handle, if given.
542 if existingfp is not None:
515 if existingfp is not None:
543 yield existingfp
516 yield existingfp
544
517
545 # Use a file handle being actively used for writes, if available.
518 # Use a file handle being actively used for writes, if available.
546 # There is some danger to doing this because reads will seek the
519 # There is some danger to doing this because reads will seek the
547 # file. However, _writeentry() performs a SEEK_END before all writes,
520 # file. However, _writeentry() performs a SEEK_END before all writes,
548 # so we should be safe.
521 # so we should be safe.
549 elif self._writinghandles:
522 elif self._writinghandles:
550 if self._inline:
523 if self._inline:
551 yield self._writinghandles[0]
524 yield self._writinghandles[0]
552 else:
525 else:
553 yield self._writinghandles[1]
526 yield self._writinghandles[1]
554
527
555 # Otherwise open a new file handle.
528 # Otherwise open a new file handle.
556 else:
529 else:
557 if self._inline:
530 if self._inline:
558 func = self._indexfp
531 func = self._indexfp
559 else:
532 else:
560 func = self._datafp
533 func = self._datafp
561 with func() as fp:
534 with func() as fp:
562 yield fp
535 yield fp
563
536
564 def tip(self):
537 def tip(self):
565 return self.node(len(self.index) - 1)
538 return self.node(len(self.index) - 1)
566 def __contains__(self, rev):
539 def __contains__(self, rev):
567 return 0 <= rev < len(self)
540 return 0 <= rev < len(self)
568 def __len__(self):
541 def __len__(self):
569 return len(self.index)
542 return len(self.index)
570 def __iter__(self):
543 def __iter__(self):
571 return iter(pycompat.xrange(len(self)))
544 return iter(pycompat.xrange(len(self)))
572 def revs(self, start=0, stop=None):
545 def revs(self, start=0, stop=None):
573 """iterate over all rev in this revlog (from start to stop)"""
546 """iterate over all rev in this revlog (from start to stop)"""
574 return storageutil.iterrevs(len(self), start=start, stop=stop)
547 return storageutil.iterrevs(len(self), start=start, stop=stop)
575
548
576 @util.propertycache
549 @util.propertycache
577 def nodemap(self):
550 def nodemap(self):
578 if self.index:
551 if self.index:
579 # populate mapping down to the initial node
552 # populate mapping down to the initial node
580 node0 = self.index[0][7] # get around changelog filtering
553 node0 = self.index[0][7] # get around changelog filtering
581 self.rev(node0)
554 self.rev(node0)
582 return self._nodecache
555 return self._nodecache
583
556
584 def hasnode(self, node):
557 def hasnode(self, node):
585 try:
558 try:
586 self.rev(node)
559 self.rev(node)
587 return True
560 return True
588 except KeyError:
561 except KeyError:
589 return False
562 return False
590
563
591 def candelta(self, baserev, rev):
564 def candelta(self, baserev, rev):
592 """whether two revisions (baserev, rev) can be delta-ed or not"""
565 """whether two revisions (baserev, rev) can be delta-ed or not"""
593 # Disable delta if either rev requires a content-changing flag
566 # Disable delta if either rev requires a content-changing flag
594 # processor (ex. LFS). This is because such flag processor can alter
567 # processor (ex. LFS). This is because such flag processor can alter
595 # the rawtext content that the delta will be based on, and two clients
568 # the rawtext content that the delta will be based on, and two clients
596 # could have a same revlog node with different flags (i.e. different
569 # could have a same revlog node with different flags (i.e. different
597 # rawtext contents) and the delta could be incompatible.
570 # rawtext contents) and the delta could be incompatible.
598 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
571 if ((self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS)
599 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
572 or (self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS)):
600 return False
573 return False
601 return True
574 return True
602
575
603 def clearcaches(self):
576 def clearcaches(self):
604 self._revisioncache = None
577 self._revisioncache = None
605 self._chainbasecache.clear()
578 self._chainbasecache.clear()
606 self._chunkcache = (0, '')
579 self._chunkcache = (0, '')
607 self._pcache = {}
580 self._pcache = {}
608
581
609 try:
582 try:
610 # If we are using the native C version, you are in a fun case
583 # If we are using the native C version, you are in a fun case
611 # where self.index, self.nodemap and self._nodecaches is the same
584 # where self.index, self.nodemap and self._nodecaches is the same
612 # object.
585 # object.
613 self._nodecache.clearcaches()
586 self._nodecache.clearcaches()
614 except AttributeError:
587 except AttributeError:
615 self._nodecache = {nullid: nullrev}
588 self._nodecache = {nullid: nullrev}
616 self._nodepos = None
589 self._nodepos = None
617
590
618 def rev(self, node):
591 def rev(self, node):
619 try:
592 try:
620 return self._nodecache[node]
593 return self._nodecache[node]
621 except TypeError:
594 except TypeError:
622 raise
595 raise
623 except error.RevlogError:
596 except error.RevlogError:
624 # parsers.c radix tree lookup failed
597 # parsers.c radix tree lookup failed
625 if node == wdirid or node in wdirfilenodeids:
598 if node == wdirid or node in wdirfilenodeids:
626 raise error.WdirUnsupported
599 raise error.WdirUnsupported
627 raise error.LookupError(node, self.indexfile, _('no node'))
600 raise error.LookupError(node, self.indexfile, _('no node'))
628 except KeyError:
601 except KeyError:
629 # pure python cache lookup failed
602 # pure python cache lookup failed
630 n = self._nodecache
603 n = self._nodecache
631 i = self.index
604 i = self.index
632 p = self._nodepos
605 p = self._nodepos
633 if p is None:
606 if p is None:
634 p = len(i) - 1
607 p = len(i) - 1
635 else:
608 else:
636 assert p < len(i)
609 assert p < len(i)
637 for r in pycompat.xrange(p, -1, -1):
610 for r in pycompat.xrange(p, -1, -1):
638 v = i[r][7]
611 v = i[r][7]
639 n[v] = r
612 n[v] = r
640 if v == node:
613 if v == node:
641 self._nodepos = r - 1
614 self._nodepos = r - 1
642 return r
615 return r
643 if node == wdirid or node in wdirfilenodeids:
616 if node == wdirid or node in wdirfilenodeids:
644 raise error.WdirUnsupported
617 raise error.WdirUnsupported
645 raise error.LookupError(node, self.indexfile, _('no node'))
618 raise error.LookupError(node, self.indexfile, _('no node'))
646
619
647 # Accessors for index entries.
620 # Accessors for index entries.
648
621
649 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
622 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
650 # are flags.
623 # are flags.
651 def start(self, rev):
624 def start(self, rev):
652 return int(self.index[rev][0] >> 16)
625 return int(self.index[rev][0] >> 16)
653
626
654 def flags(self, rev):
627 def flags(self, rev):
655 return self.index[rev][0] & 0xFFFF
628 return self.index[rev][0] & 0xFFFF
656
629
657 def length(self, rev):
630 def length(self, rev):
658 return self.index[rev][1]
631 return self.index[rev][1]
659
632
660 def rawsize(self, rev):
633 def rawsize(self, rev):
661 """return the length of the uncompressed text for a given revision"""
634 """return the length of the uncompressed text for a given revision"""
662 l = self.index[rev][2]
635 l = self.index[rev][2]
663 if l >= 0:
636 if l >= 0:
664 return l
637 return l
665
638
666 t = self.revision(rev, raw=True)
639 t = self.revision(rev, raw=True)
667 return len(t)
640 return len(t)
668
641
669 def size(self, rev):
642 def size(self, rev):
670 """length of non-raw text (processed by a "read" flag processor)"""
643 """length of non-raw text (processed by a "read" flag processor)"""
671 # fast path: if no "read" flag processor could change the content,
644 # fast path: if no "read" flag processor could change the content,
672 # size is rawsize. note: ELLIPSIS is known to not change the content.
645 # size is rawsize. note: ELLIPSIS is known to not change the content.
673 flags = self.flags(rev)
646 flags = self.flags(rev)
674 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
647 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
675 return self.rawsize(rev)
648 return self.rawsize(rev)
676
649
677 return len(self.revision(rev, raw=False))
650 return len(self.revision(rev, raw=False))
678
651
679 def chainbase(self, rev):
652 def chainbase(self, rev):
680 base = self._chainbasecache.get(rev)
653 base = self._chainbasecache.get(rev)
681 if base is not None:
654 if base is not None:
682 return base
655 return base
683
656
684 index = self.index
657 index = self.index
685 iterrev = rev
658 iterrev = rev
686 base = index[iterrev][3]
659 base = index[iterrev][3]
687 while base != iterrev:
660 while base != iterrev:
688 iterrev = base
661 iterrev = base
689 base = index[iterrev][3]
662 base = index[iterrev][3]
690
663
691 self._chainbasecache[rev] = base
664 self._chainbasecache[rev] = base
692 return base
665 return base
693
666
694 def linkrev(self, rev):
667 def linkrev(self, rev):
695 return self.index[rev][4]
668 return self.index[rev][4]
696
669
697 def parentrevs(self, rev):
670 def parentrevs(self, rev):
698 try:
671 try:
699 entry = self.index[rev]
672 entry = self.index[rev]
700 except IndexError:
673 except IndexError:
701 if rev == wdirrev:
674 if rev == wdirrev:
702 raise error.WdirUnsupported
675 raise error.WdirUnsupported
703 raise
676 raise
704
677
705 return entry[5], entry[6]
678 return entry[5], entry[6]
706
679
707 # fast parentrevs(rev) where rev isn't filtered
680 # fast parentrevs(rev) where rev isn't filtered
708 _uncheckedparentrevs = parentrevs
681 _uncheckedparentrevs = parentrevs
709
682
710 def node(self, rev):
683 def node(self, rev):
711 try:
684 try:
712 return self.index[rev][7]
685 return self.index[rev][7]
713 except IndexError:
686 except IndexError:
714 if rev == wdirrev:
687 if rev == wdirrev:
715 raise error.WdirUnsupported
688 raise error.WdirUnsupported
716 raise
689 raise
717
690
718 # Derived from index values.
691 # Derived from index values.
719
692
720 def end(self, rev):
693 def end(self, rev):
721 return self.start(rev) + self.length(rev)
694 return self.start(rev) + self.length(rev)
722
695
723 def parents(self, node):
696 def parents(self, node):
724 i = self.index
697 i = self.index
725 d = i[self.rev(node)]
698 d = i[self.rev(node)]
726 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
699 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
727
700
728 def chainlen(self, rev):
701 def chainlen(self, rev):
729 return self._chaininfo(rev)[0]
702 return self._chaininfo(rev)[0]
730
703
731 def _chaininfo(self, rev):
704 def _chaininfo(self, rev):
732 chaininfocache = self._chaininfocache
705 chaininfocache = self._chaininfocache
733 if rev in chaininfocache:
706 if rev in chaininfocache:
734 return chaininfocache[rev]
707 return chaininfocache[rev]
735 index = self.index
708 index = self.index
736 generaldelta = self._generaldelta
709 generaldelta = self._generaldelta
737 iterrev = rev
710 iterrev = rev
738 e = index[iterrev]
711 e = index[iterrev]
739 clen = 0
712 clen = 0
740 compresseddeltalen = 0
713 compresseddeltalen = 0
741 while iterrev != e[3]:
714 while iterrev != e[3]:
742 clen += 1
715 clen += 1
743 compresseddeltalen += e[1]
716 compresseddeltalen += e[1]
744 if generaldelta:
717 if generaldelta:
745 iterrev = e[3]
718 iterrev = e[3]
746 else:
719 else:
747 iterrev -= 1
720 iterrev -= 1
748 if iterrev in chaininfocache:
721 if iterrev in chaininfocache:
749 t = chaininfocache[iterrev]
722 t = chaininfocache[iterrev]
750 clen += t[0]
723 clen += t[0]
751 compresseddeltalen += t[1]
724 compresseddeltalen += t[1]
752 break
725 break
753 e = index[iterrev]
726 e = index[iterrev]
754 else:
727 else:
755 # Add text length of base since decompressing that also takes
728 # Add text length of base since decompressing that also takes
756 # work. For cache hits the length is already included.
729 # work. For cache hits the length is already included.
757 compresseddeltalen += e[1]
730 compresseddeltalen += e[1]
758 r = (clen, compresseddeltalen)
731 r = (clen, compresseddeltalen)
759 chaininfocache[rev] = r
732 chaininfocache[rev] = r
760 return r
733 return r
761
734
762 def _deltachain(self, rev, stoprev=None):
735 def _deltachain(self, rev, stoprev=None):
763 """Obtain the delta chain for a revision.
736 """Obtain the delta chain for a revision.
764
737
765 ``stoprev`` specifies a revision to stop at. If not specified, we
738 ``stoprev`` specifies a revision to stop at. If not specified, we
766 stop at the base of the chain.
739 stop at the base of the chain.
767
740
768 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
741 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
769 revs in ascending order and ``stopped`` is a bool indicating whether
742 revs in ascending order and ``stopped`` is a bool indicating whether
770 ``stoprev`` was hit.
743 ``stoprev`` was hit.
771 """
744 """
772 # Try C implementation.
745 # Try C implementation.
773 try:
746 try:
774 return self.index.deltachain(rev, stoprev, self._generaldelta)
747 return self.index.deltachain(rev, stoprev, self._generaldelta)
775 except AttributeError:
748 except AttributeError:
776 pass
749 pass
777
750
778 chain = []
751 chain = []
779
752
780 # Alias to prevent attribute lookup in tight loop.
753 # Alias to prevent attribute lookup in tight loop.
781 index = self.index
754 index = self.index
782 generaldelta = self._generaldelta
755 generaldelta = self._generaldelta
783
756
784 iterrev = rev
757 iterrev = rev
785 e = index[iterrev]
758 e = index[iterrev]
786 while iterrev != e[3] and iterrev != stoprev:
759 while iterrev != e[3] and iterrev != stoprev:
787 chain.append(iterrev)
760 chain.append(iterrev)
788 if generaldelta:
761 if generaldelta:
789 iterrev = e[3]
762 iterrev = e[3]
790 else:
763 else:
791 iterrev -= 1
764 iterrev -= 1
792 e = index[iterrev]
765 e = index[iterrev]
793
766
794 if iterrev == stoprev:
767 if iterrev == stoprev:
795 stopped = True
768 stopped = True
796 else:
769 else:
797 chain.append(iterrev)
770 chain.append(iterrev)
798 stopped = False
771 stopped = False
799
772
800 chain.reverse()
773 chain.reverse()
801 return chain, stopped
774 return chain, stopped
802
775
803 def ancestors(self, revs, stoprev=0, inclusive=False):
776 def ancestors(self, revs, stoprev=0, inclusive=False):
804 """Generate the ancestors of 'revs' in reverse revision order.
777 """Generate the ancestors of 'revs' in reverse revision order.
805 Does not generate revs lower than stoprev.
778 Does not generate revs lower than stoprev.
806
779
807 See the documentation for ancestor.lazyancestors for more details."""
780 See the documentation for ancestor.lazyancestors for more details."""
808
781
809 # first, make sure start revisions aren't filtered
782 # first, make sure start revisions aren't filtered
810 revs = list(revs)
783 revs = list(revs)
811 checkrev = self.node
784 checkrev = self.node
812 for r in revs:
785 for r in revs:
813 checkrev(r)
786 checkrev(r)
814 # and we're sure ancestors aren't filtered as well
787 # and we're sure ancestors aren't filtered as well
815
788
816 if rustancestor is not None:
789 if rustancestor is not None:
817 lazyancestors = rustancestor.LazyAncestors
790 lazyancestors = rustancestor.LazyAncestors
818 arg = self.index
791 arg = self.index
819 elif util.safehasattr(parsers, 'rustlazyancestors'):
792 elif util.safehasattr(parsers, 'rustlazyancestors'):
820 lazyancestors = ancestor.rustlazyancestors
793 lazyancestors = ancestor.rustlazyancestors
821 arg = self.index
794 arg = self.index
822 else:
795 else:
823 lazyancestors = ancestor.lazyancestors
796 lazyancestors = ancestor.lazyancestors
824 arg = self._uncheckedparentrevs
797 arg = self._uncheckedparentrevs
825 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
798 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
826
799
827 def descendants(self, revs):
800 def descendants(self, revs):
828 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
801 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
829
802
830 def findcommonmissing(self, common=None, heads=None):
803 def findcommonmissing(self, common=None, heads=None):
831 """Return a tuple of the ancestors of common and the ancestors of heads
804 """Return a tuple of the ancestors of common and the ancestors of heads
832 that are not ancestors of common. In revset terminology, we return the
805 that are not ancestors of common. In revset terminology, we return the
833 tuple:
806 tuple:
834
807
835 ::common, (::heads) - (::common)
808 ::common, (::heads) - (::common)
836
809
837 The list is sorted by revision number, meaning it is
810 The list is sorted by revision number, meaning it is
838 topologically sorted.
811 topologically sorted.
839
812
840 'heads' and 'common' are both lists of node IDs. If heads is
813 'heads' and 'common' are both lists of node IDs. If heads is
841 not supplied, uses all of the revlog's heads. If common is not
814 not supplied, uses all of the revlog's heads. If common is not
842 supplied, uses nullid."""
815 supplied, uses nullid."""
843 if common is None:
816 if common is None:
844 common = [nullid]
817 common = [nullid]
845 if heads is None:
818 if heads is None:
846 heads = self.heads()
819 heads = self.heads()
847
820
848 common = [self.rev(n) for n in common]
821 common = [self.rev(n) for n in common]
849 heads = [self.rev(n) for n in heads]
822 heads = [self.rev(n) for n in heads]
850
823
851 # we want the ancestors, but inclusive
824 # we want the ancestors, but inclusive
852 class lazyset(object):
825 class lazyset(object):
853 def __init__(self, lazyvalues):
826 def __init__(self, lazyvalues):
854 self.addedvalues = set()
827 self.addedvalues = set()
855 self.lazyvalues = lazyvalues
828 self.lazyvalues = lazyvalues
856
829
857 def __contains__(self, value):
830 def __contains__(self, value):
858 return value in self.addedvalues or value in self.lazyvalues
831 return value in self.addedvalues or value in self.lazyvalues
859
832
860 def __iter__(self):
833 def __iter__(self):
861 added = self.addedvalues
834 added = self.addedvalues
862 for r in added:
835 for r in added:
863 yield r
836 yield r
864 for r in self.lazyvalues:
837 for r in self.lazyvalues:
865 if not r in added:
838 if not r in added:
866 yield r
839 yield r
867
840
868 def add(self, value):
841 def add(self, value):
869 self.addedvalues.add(value)
842 self.addedvalues.add(value)
870
843
871 def update(self, values):
844 def update(self, values):
872 self.addedvalues.update(values)
845 self.addedvalues.update(values)
873
846
874 has = lazyset(self.ancestors(common))
847 has = lazyset(self.ancestors(common))
875 has.add(nullrev)
848 has.add(nullrev)
876 has.update(common)
849 has.update(common)
877
850
878 # take all ancestors from heads that aren't in has
851 # take all ancestors from heads that aren't in has
879 missing = set()
852 missing = set()
880 visit = collections.deque(r for r in heads if r not in has)
853 visit = collections.deque(r for r in heads if r not in has)
881 while visit:
854 while visit:
882 r = visit.popleft()
855 r = visit.popleft()
883 if r in missing:
856 if r in missing:
884 continue
857 continue
885 else:
858 else:
886 missing.add(r)
859 missing.add(r)
887 for p in self.parentrevs(r):
860 for p in self.parentrevs(r):
888 if p not in has:
861 if p not in has:
889 visit.append(p)
862 visit.append(p)
890 missing = list(missing)
863 missing = list(missing)
891 missing.sort()
864 missing.sort()
892 return has, [self.node(miss) for miss in missing]
865 return has, [self.node(miss) for miss in missing]
893
866
894 def incrementalmissingrevs(self, common=None):
867 def incrementalmissingrevs(self, common=None):
895 """Return an object that can be used to incrementally compute the
868 """Return an object that can be used to incrementally compute the
896 revision numbers of the ancestors of arbitrary sets that are not
869 revision numbers of the ancestors of arbitrary sets that are not
897 ancestors of common. This is an ancestor.incrementalmissingancestors
870 ancestors of common. This is an ancestor.incrementalmissingancestors
898 object.
871 object.
899
872
900 'common' is a list of revision numbers. If common is not supplied, uses
873 'common' is a list of revision numbers. If common is not supplied, uses
901 nullrev.
874 nullrev.
902 """
875 """
903 if common is None:
876 if common is None:
904 common = [nullrev]
877 common = [nullrev]
905
878
906 if rustancestor is not None:
879 if rustancestor is not None:
907 return rustancestor.MissingAncestors(self.index, common)
880 return rustancestor.MissingAncestors(self.index, common)
908 return ancestor.incrementalmissingancestors(self.parentrevs, common)
881 return ancestor.incrementalmissingancestors(self.parentrevs, common)
909
882
910 def findmissingrevs(self, common=None, heads=None):
883 def findmissingrevs(self, common=None, heads=None):
911 """Return the revision numbers of the ancestors of heads that
884 """Return the revision numbers of the ancestors of heads that
912 are not ancestors of common.
885 are not ancestors of common.
913
886
914 More specifically, return a list of revision numbers corresponding to
887 More specifically, return a list of revision numbers corresponding to
915 nodes N such that every N satisfies the following constraints:
888 nodes N such that every N satisfies the following constraints:
916
889
917 1. N is an ancestor of some node in 'heads'
890 1. N is an ancestor of some node in 'heads'
918 2. N is not an ancestor of any node in 'common'
891 2. N is not an ancestor of any node in 'common'
919
892
920 The list is sorted by revision number, meaning it is
893 The list is sorted by revision number, meaning it is
921 topologically sorted.
894 topologically sorted.
922
895
923 'heads' and 'common' are both lists of revision numbers. If heads is
896 'heads' and 'common' are both lists of revision numbers. If heads is
924 not supplied, uses all of the revlog's heads. If common is not
897 not supplied, uses all of the revlog's heads. If common is not
925 supplied, uses nullid."""
898 supplied, uses nullid."""
926 if common is None:
899 if common is None:
927 common = [nullrev]
900 common = [nullrev]
928 if heads is None:
901 if heads is None:
929 heads = self.headrevs()
902 heads = self.headrevs()
930
903
931 inc = self.incrementalmissingrevs(common=common)
904 inc = self.incrementalmissingrevs(common=common)
932 return inc.missingancestors(heads)
905 return inc.missingancestors(heads)
933
906
934 def findmissing(self, common=None, heads=None):
907 def findmissing(self, common=None, heads=None):
935 """Return the ancestors of heads that are not ancestors of common.
908 """Return the ancestors of heads that are not ancestors of common.
936
909
937 More specifically, return a list of nodes N such that every N
910 More specifically, return a list of nodes N such that every N
938 satisfies the following constraints:
911 satisfies the following constraints:
939
912
940 1. N is an ancestor of some node in 'heads'
913 1. N is an ancestor of some node in 'heads'
941 2. N is not an ancestor of any node in 'common'
914 2. N is not an ancestor of any node in 'common'
942
915
943 The list is sorted by revision number, meaning it is
916 The list is sorted by revision number, meaning it is
944 topologically sorted.
917 topologically sorted.
945
918
946 'heads' and 'common' are both lists of node IDs. If heads is
919 'heads' and 'common' are both lists of node IDs. If heads is
947 not supplied, uses all of the revlog's heads. If common is not
920 not supplied, uses all of the revlog's heads. If common is not
948 supplied, uses nullid."""
921 supplied, uses nullid."""
949 if common is None:
922 if common is None:
950 common = [nullid]
923 common = [nullid]
951 if heads is None:
924 if heads is None:
952 heads = self.heads()
925 heads = self.heads()
953
926
954 common = [self.rev(n) for n in common]
927 common = [self.rev(n) for n in common]
955 heads = [self.rev(n) for n in heads]
928 heads = [self.rev(n) for n in heads]
956
929
957 inc = self.incrementalmissingrevs(common=common)
930 inc = self.incrementalmissingrevs(common=common)
958 return [self.node(r) for r in inc.missingancestors(heads)]
931 return [self.node(r) for r in inc.missingancestors(heads)]
959
932
960 def nodesbetween(self, roots=None, heads=None):
933 def nodesbetween(self, roots=None, heads=None):
961 """Return a topological path from 'roots' to 'heads'.
934 """Return a topological path from 'roots' to 'heads'.
962
935
963 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
936 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
964 topologically sorted list of all nodes N that satisfy both of
937 topologically sorted list of all nodes N that satisfy both of
965 these constraints:
938 these constraints:
966
939
967 1. N is a descendant of some node in 'roots'
940 1. N is a descendant of some node in 'roots'
968 2. N is an ancestor of some node in 'heads'
941 2. N is an ancestor of some node in 'heads'
969
942
970 Every node is considered to be both a descendant and an ancestor
943 Every node is considered to be both a descendant and an ancestor
971 of itself, so every reachable node in 'roots' and 'heads' will be
944 of itself, so every reachable node in 'roots' and 'heads' will be
972 included in 'nodes'.
945 included in 'nodes'.
973
946
974 'outroots' is the list of reachable nodes in 'roots', i.e., the
947 'outroots' is the list of reachable nodes in 'roots', i.e., the
975 subset of 'roots' that is returned in 'nodes'. Likewise,
948 subset of 'roots' that is returned in 'nodes'. Likewise,
976 'outheads' is the subset of 'heads' that is also in 'nodes'.
949 'outheads' is the subset of 'heads' that is also in 'nodes'.
977
950
978 'roots' and 'heads' are both lists of node IDs. If 'roots' is
951 'roots' and 'heads' are both lists of node IDs. If 'roots' is
979 unspecified, uses nullid as the only root. If 'heads' is
952 unspecified, uses nullid as the only root. If 'heads' is
980 unspecified, uses list of all of the revlog's heads."""
953 unspecified, uses list of all of the revlog's heads."""
981 nonodes = ([], [], [])
954 nonodes = ([], [], [])
982 if roots is not None:
955 if roots is not None:
983 roots = list(roots)
956 roots = list(roots)
984 if not roots:
957 if not roots:
985 return nonodes
958 return nonodes
986 lowestrev = min([self.rev(n) for n in roots])
959 lowestrev = min([self.rev(n) for n in roots])
987 else:
960 else:
988 roots = [nullid] # Everybody's a descendant of nullid
961 roots = [nullid] # Everybody's a descendant of nullid
989 lowestrev = nullrev
962 lowestrev = nullrev
990 if (lowestrev == nullrev) and (heads is None):
963 if (lowestrev == nullrev) and (heads is None):
991 # We want _all_ the nodes!
964 # We want _all_ the nodes!
992 return ([self.node(r) for r in self], [nullid], list(self.heads()))
965 return ([self.node(r) for r in self], [nullid], list(self.heads()))
993 if heads is None:
966 if heads is None:
994 # All nodes are ancestors, so the latest ancestor is the last
967 # All nodes are ancestors, so the latest ancestor is the last
995 # node.
968 # node.
996 highestrev = len(self) - 1
969 highestrev = len(self) - 1
997 # Set ancestors to None to signal that every node is an ancestor.
970 # Set ancestors to None to signal that every node is an ancestor.
998 ancestors = None
971 ancestors = None
999 # Set heads to an empty dictionary for later discovery of heads
972 # Set heads to an empty dictionary for later discovery of heads
1000 heads = {}
973 heads = {}
1001 else:
974 else:
1002 heads = list(heads)
975 heads = list(heads)
1003 if not heads:
976 if not heads:
1004 return nonodes
977 return nonodes
1005 ancestors = set()
978 ancestors = set()
1006 # Turn heads into a dictionary so we can remove 'fake' heads.
979 # Turn heads into a dictionary so we can remove 'fake' heads.
1007 # Also, later we will be using it to filter out the heads we can't
980 # Also, later we will be using it to filter out the heads we can't
1008 # find from roots.
981 # find from roots.
1009 heads = dict.fromkeys(heads, False)
982 heads = dict.fromkeys(heads, False)
1010 # Start at the top and keep marking parents until we're done.
983 # Start at the top and keep marking parents until we're done.
1011 nodestotag = set(heads)
984 nodestotag = set(heads)
1012 # Remember where the top was so we can use it as a limit later.
985 # Remember where the top was so we can use it as a limit later.
1013 highestrev = max([self.rev(n) for n in nodestotag])
986 highestrev = max([self.rev(n) for n in nodestotag])
1014 while nodestotag:
987 while nodestotag:
1015 # grab a node to tag
988 # grab a node to tag
1016 n = nodestotag.pop()
989 n = nodestotag.pop()
1017 # Never tag nullid
990 # Never tag nullid
1018 if n == nullid:
991 if n == nullid:
1019 continue
992 continue
1020 # A node's revision number represents its place in a
993 # A node's revision number represents its place in a
1021 # topologically sorted list of nodes.
994 # topologically sorted list of nodes.
1022 r = self.rev(n)
995 r = self.rev(n)
1023 if r >= lowestrev:
996 if r >= lowestrev:
1024 if n not in ancestors:
997 if n not in ancestors:
1025 # If we are possibly a descendant of one of the roots
998 # If we are possibly a descendant of one of the roots
1026 # and we haven't already been marked as an ancestor
999 # and we haven't already been marked as an ancestor
1027 ancestors.add(n) # Mark as ancestor
1000 ancestors.add(n) # Mark as ancestor
1028 # Add non-nullid parents to list of nodes to tag.
1001 # Add non-nullid parents to list of nodes to tag.
1029 nodestotag.update([p for p in self.parents(n) if
1002 nodestotag.update([p for p in self.parents(n) if
1030 p != nullid])
1003 p != nullid])
1031 elif n in heads: # We've seen it before, is it a fake head?
1004 elif n in heads: # We've seen it before, is it a fake head?
1032 # So it is, real heads should not be the ancestors of
1005 # So it is, real heads should not be the ancestors of
1033 # any other heads.
1006 # any other heads.
1034 heads.pop(n)
1007 heads.pop(n)
1035 if not ancestors:
1008 if not ancestors:
1036 return nonodes
1009 return nonodes
1037 # Now that we have our set of ancestors, we want to remove any
1010 # Now that we have our set of ancestors, we want to remove any
1038 # roots that are not ancestors.
1011 # roots that are not ancestors.
1039
1012
1040 # If one of the roots was nullid, everything is included anyway.
1013 # If one of the roots was nullid, everything is included anyway.
1041 if lowestrev > nullrev:
1014 if lowestrev > nullrev:
1042 # But, since we weren't, let's recompute the lowest rev to not
1015 # But, since we weren't, let's recompute the lowest rev to not
1043 # include roots that aren't ancestors.
1016 # include roots that aren't ancestors.
1044
1017
1045 # Filter out roots that aren't ancestors of heads
1018 # Filter out roots that aren't ancestors of heads
1046 roots = [root for root in roots if root in ancestors]
1019 roots = [root for root in roots if root in ancestors]
1047 # Recompute the lowest revision
1020 # Recompute the lowest revision
1048 if roots:
1021 if roots:
1049 lowestrev = min([self.rev(root) for root in roots])
1022 lowestrev = min([self.rev(root) for root in roots])
1050 else:
1023 else:
1051 # No more roots? Return empty list
1024 # No more roots? Return empty list
1052 return nonodes
1025 return nonodes
1053 else:
1026 else:
1054 # We are descending from nullid, and don't need to care about
1027 # We are descending from nullid, and don't need to care about
1055 # any other roots.
1028 # any other roots.
1056 lowestrev = nullrev
1029 lowestrev = nullrev
1057 roots = [nullid]
1030 roots = [nullid]
1058 # Transform our roots list into a set.
1031 # Transform our roots list into a set.
1059 descendants = set(roots)
1032 descendants = set(roots)
1060 # Also, keep the original roots so we can filter out roots that aren't
1033 # Also, keep the original roots so we can filter out roots that aren't
1061 # 'real' roots (i.e. are descended from other roots).
1034 # 'real' roots (i.e. are descended from other roots).
1062 roots = descendants.copy()
1035 roots = descendants.copy()
1063 # Our topologically sorted list of output nodes.
1036 # Our topologically sorted list of output nodes.
1064 orderedout = []
1037 orderedout = []
1065 # Don't start at nullid since we don't want nullid in our output list,
1038 # Don't start at nullid since we don't want nullid in our output list,
1066 # and if nullid shows up in descendants, empty parents will look like
1039 # and if nullid shows up in descendants, empty parents will look like
1067 # they're descendants.
1040 # they're descendants.
1068 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1041 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1069 n = self.node(r)
1042 n = self.node(r)
1070 isdescendant = False
1043 isdescendant = False
1071 if lowestrev == nullrev: # Everybody is a descendant of nullid
1044 if lowestrev == nullrev: # Everybody is a descendant of nullid
1072 isdescendant = True
1045 isdescendant = True
1073 elif n in descendants:
1046 elif n in descendants:
1074 # n is already a descendant
1047 # n is already a descendant
1075 isdescendant = True
1048 isdescendant = True
1076 # This check only needs to be done here because all the roots
1049 # This check only needs to be done here because all the roots
1077 # will start being marked is descendants before the loop.
1050 # will start being marked is descendants before the loop.
1078 if n in roots:
1051 if n in roots:
1079 # If n was a root, check if it's a 'real' root.
1052 # If n was a root, check if it's a 'real' root.
1080 p = tuple(self.parents(n))
1053 p = tuple(self.parents(n))
1081 # If any of its parents are descendants, it's not a root.
1054 # If any of its parents are descendants, it's not a root.
1082 if (p[0] in descendants) or (p[1] in descendants):
1055 if (p[0] in descendants) or (p[1] in descendants):
1083 roots.remove(n)
1056 roots.remove(n)
1084 else:
1057 else:
1085 p = tuple(self.parents(n))
1058 p = tuple(self.parents(n))
1086 # A node is a descendant if either of its parents are
1059 # A node is a descendant if either of its parents are
1087 # descendants. (We seeded the dependents list with the roots
1060 # descendants. (We seeded the dependents list with the roots
1088 # up there, remember?)
1061 # up there, remember?)
1089 if (p[0] in descendants) or (p[1] in descendants):
1062 if (p[0] in descendants) or (p[1] in descendants):
1090 descendants.add(n)
1063 descendants.add(n)
1091 isdescendant = True
1064 isdescendant = True
1092 if isdescendant and ((ancestors is None) or (n in ancestors)):
1065 if isdescendant and ((ancestors is None) or (n in ancestors)):
1093 # Only include nodes that are both descendants and ancestors.
1066 # Only include nodes that are both descendants and ancestors.
1094 orderedout.append(n)
1067 orderedout.append(n)
1095 if (ancestors is not None) and (n in heads):
1068 if (ancestors is not None) and (n in heads):
1096 # We're trying to figure out which heads are reachable
1069 # We're trying to figure out which heads are reachable
1097 # from roots.
1070 # from roots.
1098 # Mark this head as having been reached
1071 # Mark this head as having been reached
1099 heads[n] = True
1072 heads[n] = True
1100 elif ancestors is None:
1073 elif ancestors is None:
1101 # Otherwise, we're trying to discover the heads.
1074 # Otherwise, we're trying to discover the heads.
1102 # Assume this is a head because if it isn't, the next step
1075 # Assume this is a head because if it isn't, the next step
1103 # will eventually remove it.
1076 # will eventually remove it.
1104 heads[n] = True
1077 heads[n] = True
1105 # But, obviously its parents aren't.
1078 # But, obviously its parents aren't.
1106 for p in self.parents(n):
1079 for p in self.parents(n):
1107 heads.pop(p, None)
1080 heads.pop(p, None)
1108 heads = [head for head, flag in heads.iteritems() if flag]
1081 heads = [head for head, flag in heads.iteritems() if flag]
1109 roots = list(roots)
1082 roots = list(roots)
1110 assert orderedout
1083 assert orderedout
1111 assert roots
1084 assert roots
1112 assert heads
1085 assert heads
1113 return (orderedout, roots, heads)
1086 return (orderedout, roots, heads)
1114
1087
1115 def headrevs(self, revs=None):
1088 def headrevs(self, revs=None):
1116 if revs is None:
1089 if revs is None:
1117 try:
1090 try:
1118 return self.index.headrevs()
1091 return self.index.headrevs()
1119 except AttributeError:
1092 except AttributeError:
1120 return self._headrevs()
1093 return self._headrevs()
1121 if rustdagop is not None:
1094 if rustdagop is not None:
1122 return rustdagop.headrevs(self.index, revs)
1095 return rustdagop.headrevs(self.index, revs)
1123 return dagop.headrevs(revs, self._uncheckedparentrevs)
1096 return dagop.headrevs(revs, self._uncheckedparentrevs)
1124
1097
1125 def computephases(self, roots):
1098 def computephases(self, roots):
1126 return self.index.computephasesmapsets(roots)
1099 return self.index.computephasesmapsets(roots)
1127
1100
1128 def _headrevs(self):
1101 def _headrevs(self):
1129 count = len(self)
1102 count = len(self)
1130 if not count:
1103 if not count:
1131 return [nullrev]
1104 return [nullrev]
1132 # we won't iter over filtered rev so nobody is a head at start
1105 # we won't iter over filtered rev so nobody is a head at start
1133 ishead = [0] * (count + 1)
1106 ishead = [0] * (count + 1)
1134 index = self.index
1107 index = self.index
1135 for r in self:
1108 for r in self:
1136 ishead[r] = 1 # I may be an head
1109 ishead[r] = 1 # I may be an head
1137 e = index[r]
1110 e = index[r]
1138 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1111 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1139 return [r for r, val in enumerate(ishead) if val]
1112 return [r for r, val in enumerate(ishead) if val]
1140
1113
1141 def heads(self, start=None, stop=None):
1114 def heads(self, start=None, stop=None):
1142 """return the list of all nodes that have no children
1115 """return the list of all nodes that have no children
1143
1116
1144 if start is specified, only heads that are descendants of
1117 if start is specified, only heads that are descendants of
1145 start will be returned
1118 start will be returned
1146 if stop is specified, it will consider all the revs from stop
1119 if stop is specified, it will consider all the revs from stop
1147 as if they had no children
1120 as if they had no children
1148 """
1121 """
1149 if start is None and stop is None:
1122 if start is None and stop is None:
1150 if not len(self):
1123 if not len(self):
1151 return [nullid]
1124 return [nullid]
1152 return [self.node(r) for r in self.headrevs()]
1125 return [self.node(r) for r in self.headrevs()]
1153
1126
1154 if start is None:
1127 if start is None:
1155 start = nullrev
1128 start = nullrev
1156 else:
1129 else:
1157 start = self.rev(start)
1130 start = self.rev(start)
1158
1131
1159 stoprevs = set(self.rev(n) for n in stop or [])
1132 stoprevs = set(self.rev(n) for n in stop or [])
1160
1133
1161 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1134 revs = dagop.headrevssubset(self.revs, self.parentrevs, startrev=start,
1162 stoprevs=stoprevs)
1135 stoprevs=stoprevs)
1163
1136
1164 return [self.node(rev) for rev in revs]
1137 return [self.node(rev) for rev in revs]
1165
1138
1166 def children(self, node):
1139 def children(self, node):
1167 """find the children of a given node"""
1140 """find the children of a given node"""
1168 c = []
1141 c = []
1169 p = self.rev(node)
1142 p = self.rev(node)
1170 for r in self.revs(start=p + 1):
1143 for r in self.revs(start=p + 1):
1171 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1144 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1172 if prevs:
1145 if prevs:
1173 for pr in prevs:
1146 for pr in prevs:
1174 if pr == p:
1147 if pr == p:
1175 c.append(self.node(r))
1148 c.append(self.node(r))
1176 elif p == nullrev:
1149 elif p == nullrev:
1177 c.append(self.node(r))
1150 c.append(self.node(r))
1178 return c
1151 return c
1179
1152
1180 def commonancestorsheads(self, a, b):
1153 def commonancestorsheads(self, a, b):
1181 """calculate all the heads of the common ancestors of nodes a and b"""
1154 """calculate all the heads of the common ancestors of nodes a and b"""
1182 a, b = self.rev(a), self.rev(b)
1155 a, b = self.rev(a), self.rev(b)
1183 ancs = self._commonancestorsheads(a, b)
1156 ancs = self._commonancestorsheads(a, b)
1184 return pycompat.maplist(self.node, ancs)
1157 return pycompat.maplist(self.node, ancs)
1185
1158
1186 def _commonancestorsheads(self, *revs):
1159 def _commonancestorsheads(self, *revs):
1187 """calculate all the heads of the common ancestors of revs"""
1160 """calculate all the heads of the common ancestors of revs"""
1188 try:
1161 try:
1189 ancs = self.index.commonancestorsheads(*revs)
1162 ancs = self.index.commonancestorsheads(*revs)
1190 except (AttributeError, OverflowError): # C implementation failed
1163 except (AttributeError, OverflowError): # C implementation failed
1191 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1164 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1192 return ancs
1165 return ancs
1193
1166
1194 def isancestor(self, a, b):
1167 def isancestor(self, a, b):
1195 """return True if node a is an ancestor of node b
1168 """return True if node a is an ancestor of node b
1196
1169
1197 A revision is considered an ancestor of itself."""
1170 A revision is considered an ancestor of itself."""
1198 a, b = self.rev(a), self.rev(b)
1171 a, b = self.rev(a), self.rev(b)
1199 return self.isancestorrev(a, b)
1172 return self.isancestorrev(a, b)
1200
1173
1201 def isancestorrev(self, a, b):
1174 def isancestorrev(self, a, b):
1202 """return True if revision a is an ancestor of revision b
1175 """return True if revision a is an ancestor of revision b
1203
1176
1204 A revision is considered an ancestor of itself.
1177 A revision is considered an ancestor of itself.
1205
1178
1206 The implementation of this is trivial but the use of
1179 The implementation of this is trivial but the use of
1207 reachableroots is not."""
1180 reachableroots is not."""
1208 if a == nullrev:
1181 if a == nullrev:
1209 return True
1182 return True
1210 elif a == b:
1183 elif a == b:
1211 return True
1184 return True
1212 elif a > b:
1185 elif a > b:
1213 return False
1186 return False
1214 return bool(self.reachableroots(a, [b], [a], includepath=False))
1187 return bool(self.reachableroots(a, [b], [a], includepath=False))
1215
1188
1216 def reachableroots(self, minroot, heads, roots, includepath=False):
1189 def reachableroots(self, minroot, heads, roots, includepath=False):
1217 """return (heads(::<roots> and <roots>::<heads>))
1190 """return (heads(::<roots> and <roots>::<heads>))
1218
1191
1219 If includepath is True, return (<roots>::<heads>)."""
1192 If includepath is True, return (<roots>::<heads>)."""
1220 try:
1193 try:
1221 return self.index.reachableroots2(minroot, heads, roots,
1194 return self.index.reachableroots2(minroot, heads, roots,
1222 includepath)
1195 includepath)
1223 except AttributeError:
1196 except AttributeError:
1224 return dagop._reachablerootspure(self.parentrevs,
1197 return dagop._reachablerootspure(self.parentrevs,
1225 minroot, roots, heads, includepath)
1198 minroot, roots, heads, includepath)
1226
1199
1227 def ancestor(self, a, b):
1200 def ancestor(self, a, b):
1228 """calculate the "best" common ancestor of nodes a and b"""
1201 """calculate the "best" common ancestor of nodes a and b"""
1229
1202
1230 a, b = self.rev(a), self.rev(b)
1203 a, b = self.rev(a), self.rev(b)
1231 try:
1204 try:
1232 ancs = self.index.ancestors(a, b)
1205 ancs = self.index.ancestors(a, b)
1233 except (AttributeError, OverflowError):
1206 except (AttributeError, OverflowError):
1234 ancs = ancestor.ancestors(self.parentrevs, a, b)
1207 ancs = ancestor.ancestors(self.parentrevs, a, b)
1235 if ancs:
1208 if ancs:
1236 # choose a consistent winner when there's a tie
1209 # choose a consistent winner when there's a tie
1237 return min(map(self.node, ancs))
1210 return min(map(self.node, ancs))
1238 return nullid
1211 return nullid
1239
1212
1240 def _match(self, id):
1213 def _match(self, id):
1241 if isinstance(id, int):
1214 if isinstance(id, int):
1242 # rev
1215 # rev
1243 return self.node(id)
1216 return self.node(id)
1244 if len(id) == 20:
1217 if len(id) == 20:
1245 # possibly a binary node
1218 # possibly a binary node
1246 # odds of a binary node being all hex in ASCII are 1 in 10**25
1219 # odds of a binary node being all hex in ASCII are 1 in 10**25
1247 try:
1220 try:
1248 node = id
1221 node = id
1249 self.rev(node) # quick search the index
1222 self.rev(node) # quick search the index
1250 return node
1223 return node
1251 except error.LookupError:
1224 except error.LookupError:
1252 pass # may be partial hex id
1225 pass # may be partial hex id
1253 try:
1226 try:
1254 # str(rev)
1227 # str(rev)
1255 rev = int(id)
1228 rev = int(id)
1256 if "%d" % rev != id:
1229 if "%d" % rev != id:
1257 raise ValueError
1230 raise ValueError
1258 if rev < 0:
1231 if rev < 0:
1259 rev = len(self) + rev
1232 rev = len(self) + rev
1260 if rev < 0 or rev >= len(self):
1233 if rev < 0 or rev >= len(self):
1261 raise ValueError
1234 raise ValueError
1262 return self.node(rev)
1235 return self.node(rev)
1263 except (ValueError, OverflowError):
1236 except (ValueError, OverflowError):
1264 pass
1237 pass
1265 if len(id) == 40:
1238 if len(id) == 40:
1266 try:
1239 try:
1267 # a full hex nodeid?
1240 # a full hex nodeid?
1268 node = bin(id)
1241 node = bin(id)
1269 self.rev(node)
1242 self.rev(node)
1270 return node
1243 return node
1271 except (TypeError, error.LookupError):
1244 except (TypeError, error.LookupError):
1272 pass
1245 pass
1273
1246
1274 def _partialmatch(self, id):
1247 def _partialmatch(self, id):
1275 # we don't care wdirfilenodeids as they should be always full hash
1248 # we don't care wdirfilenodeids as they should be always full hash
1276 maybewdir = wdirhex.startswith(id)
1249 maybewdir = wdirhex.startswith(id)
1277 try:
1250 try:
1278 partial = self.index.partialmatch(id)
1251 partial = self.index.partialmatch(id)
1279 if partial and self.hasnode(partial):
1252 if partial and self.hasnode(partial):
1280 if maybewdir:
1253 if maybewdir:
1281 # single 'ff...' match in radix tree, ambiguous with wdir
1254 # single 'ff...' match in radix tree, ambiguous with wdir
1282 raise error.RevlogError
1255 raise error.RevlogError
1283 return partial
1256 return partial
1284 if maybewdir:
1257 if maybewdir:
1285 # no 'ff...' match in radix tree, wdir identified
1258 # no 'ff...' match in radix tree, wdir identified
1286 raise error.WdirUnsupported
1259 raise error.WdirUnsupported
1287 return None
1260 return None
1288 except error.RevlogError:
1261 except error.RevlogError:
1289 # parsers.c radix tree lookup gave multiple matches
1262 # parsers.c radix tree lookup gave multiple matches
1290 # fast path: for unfiltered changelog, radix tree is accurate
1263 # fast path: for unfiltered changelog, radix tree is accurate
1291 if not getattr(self, 'filteredrevs', None):
1264 if not getattr(self, 'filteredrevs', None):
1292 raise error.AmbiguousPrefixLookupError(
1265 raise error.AmbiguousPrefixLookupError(
1293 id, self.indexfile, _('ambiguous identifier'))
1266 id, self.indexfile, _('ambiguous identifier'))
1294 # fall through to slow path that filters hidden revisions
1267 # fall through to slow path that filters hidden revisions
1295 except (AttributeError, ValueError):
1268 except (AttributeError, ValueError):
1296 # we are pure python, or key was too short to search radix tree
1269 # we are pure python, or key was too short to search radix tree
1297 pass
1270 pass
1298
1271
1299 if id in self._pcache:
1272 if id in self._pcache:
1300 return self._pcache[id]
1273 return self._pcache[id]
1301
1274
1302 if len(id) <= 40:
1275 if len(id) <= 40:
1303 try:
1276 try:
1304 # hex(node)[:...]
1277 # hex(node)[:...]
1305 l = len(id) // 2 # grab an even number of digits
1278 l = len(id) // 2 # grab an even number of digits
1306 prefix = bin(id[:l * 2])
1279 prefix = bin(id[:l * 2])
1307 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1280 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1308 nl = [n for n in nl if hex(n).startswith(id) and
1281 nl = [n for n in nl if hex(n).startswith(id) and
1309 self.hasnode(n)]
1282 self.hasnode(n)]
1310 if nullhex.startswith(id):
1283 if nullhex.startswith(id):
1311 nl.append(nullid)
1284 nl.append(nullid)
1312 if len(nl) > 0:
1285 if len(nl) > 0:
1313 if len(nl) == 1 and not maybewdir:
1286 if len(nl) == 1 and not maybewdir:
1314 self._pcache[id] = nl[0]
1287 self._pcache[id] = nl[0]
1315 return nl[0]
1288 return nl[0]
1316 raise error.AmbiguousPrefixLookupError(
1289 raise error.AmbiguousPrefixLookupError(
1317 id, self.indexfile, _('ambiguous identifier'))
1290 id, self.indexfile, _('ambiguous identifier'))
1318 if maybewdir:
1291 if maybewdir:
1319 raise error.WdirUnsupported
1292 raise error.WdirUnsupported
1320 return None
1293 return None
1321 except TypeError:
1294 except TypeError:
1322 pass
1295 pass
1323
1296
1324 def lookup(self, id):
1297 def lookup(self, id):
1325 """locate a node based on:
1298 """locate a node based on:
1326 - revision number or str(revision number)
1299 - revision number or str(revision number)
1327 - nodeid or subset of hex nodeid
1300 - nodeid or subset of hex nodeid
1328 """
1301 """
1329 n = self._match(id)
1302 n = self._match(id)
1330 if n is not None:
1303 if n is not None:
1331 return n
1304 return n
1332 n = self._partialmatch(id)
1305 n = self._partialmatch(id)
1333 if n:
1306 if n:
1334 return n
1307 return n
1335
1308
1336 raise error.LookupError(id, self.indexfile, _('no match found'))
1309 raise error.LookupError(id, self.indexfile, _('no match found'))
1337
1310
1338 def shortest(self, node, minlength=1):
1311 def shortest(self, node, minlength=1):
1339 """Find the shortest unambiguous prefix that matches node."""
1312 """Find the shortest unambiguous prefix that matches node."""
1340 def isvalid(prefix):
1313 def isvalid(prefix):
1341 try:
1314 try:
1342 matchednode = self._partialmatch(prefix)
1315 matchednode = self._partialmatch(prefix)
1343 except error.AmbiguousPrefixLookupError:
1316 except error.AmbiguousPrefixLookupError:
1344 return False
1317 return False
1345 except error.WdirUnsupported:
1318 except error.WdirUnsupported:
1346 # single 'ff...' match
1319 # single 'ff...' match
1347 return True
1320 return True
1348 if matchednode is None:
1321 if matchednode is None:
1349 raise error.LookupError(node, self.indexfile, _('no node'))
1322 raise error.LookupError(node, self.indexfile, _('no node'))
1350 return True
1323 return True
1351
1324
1352 def maybewdir(prefix):
1325 def maybewdir(prefix):
1353 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1326 return all(c == 'f' for c in pycompat.iterbytestr(prefix))
1354
1327
1355 hexnode = hex(node)
1328 hexnode = hex(node)
1356
1329
1357 def disambiguate(hexnode, minlength):
1330 def disambiguate(hexnode, minlength):
1358 """Disambiguate against wdirid."""
1331 """Disambiguate against wdirid."""
1359 for length in range(minlength, 41):
1332 for length in range(minlength, 41):
1360 prefix = hexnode[:length]
1333 prefix = hexnode[:length]
1361 if not maybewdir(prefix):
1334 if not maybewdir(prefix):
1362 return prefix
1335 return prefix
1363
1336
1364 if not getattr(self, 'filteredrevs', None):
1337 if not getattr(self, 'filteredrevs', None):
1365 try:
1338 try:
1366 length = max(self.index.shortest(node), minlength)
1339 length = max(self.index.shortest(node), minlength)
1367 return disambiguate(hexnode, length)
1340 return disambiguate(hexnode, length)
1368 except error.RevlogError:
1341 except error.RevlogError:
1369 if node != wdirid:
1342 if node != wdirid:
1370 raise error.LookupError(node, self.indexfile, _('no node'))
1343 raise error.LookupError(node, self.indexfile, _('no node'))
1371 except AttributeError:
1344 except AttributeError:
1372 # Fall through to pure code
1345 # Fall through to pure code
1373 pass
1346 pass
1374
1347
1375 if node == wdirid:
1348 if node == wdirid:
1376 for length in range(minlength, 41):
1349 for length in range(minlength, 41):
1377 prefix = hexnode[:length]
1350 prefix = hexnode[:length]
1378 if isvalid(prefix):
1351 if isvalid(prefix):
1379 return prefix
1352 return prefix
1380
1353
1381 for length in range(minlength, 41):
1354 for length in range(minlength, 41):
1382 prefix = hexnode[:length]
1355 prefix = hexnode[:length]
1383 if isvalid(prefix):
1356 if isvalid(prefix):
1384 return disambiguate(hexnode, length)
1357 return disambiguate(hexnode, length)
1385
1358
1386 def cmp(self, node, text):
1359 def cmp(self, node, text):
1387 """compare text with a given file revision
1360 """compare text with a given file revision
1388
1361
1389 returns True if text is different than what is stored.
1362 returns True if text is different than what is stored.
1390 """
1363 """
1391 p1, p2 = self.parents(node)
1364 p1, p2 = self.parents(node)
1392 return storageutil.hashrevisionsha1(text, p1, p2) != node
1365 return storageutil.hashrevisionsha1(text, p1, p2) != node
1393
1366
1394 def _cachesegment(self, offset, data):
1367 def _cachesegment(self, offset, data):
1395 """Add a segment to the revlog cache.
1368 """Add a segment to the revlog cache.
1396
1369
1397 Accepts an absolute offset and the data that is at that location.
1370 Accepts an absolute offset and the data that is at that location.
1398 """
1371 """
1399 o, d = self._chunkcache
1372 o, d = self._chunkcache
1400 # try to add to existing cache
1373 # try to add to existing cache
1401 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1374 if o + len(d) == offset and len(d) + len(data) < _chunksize:
1402 self._chunkcache = o, d + data
1375 self._chunkcache = o, d + data
1403 else:
1376 else:
1404 self._chunkcache = offset, data
1377 self._chunkcache = offset, data
1405
1378
1406 def _readsegment(self, offset, length, df=None):
1379 def _readsegment(self, offset, length, df=None):
1407 """Load a segment of raw data from the revlog.
1380 """Load a segment of raw data from the revlog.
1408
1381
1409 Accepts an absolute offset, length to read, and an optional existing
1382 Accepts an absolute offset, length to read, and an optional existing
1410 file handle to read from.
1383 file handle to read from.
1411
1384
1412 If an existing file handle is passed, it will be seeked and the
1385 If an existing file handle is passed, it will be seeked and the
1413 original seek position will NOT be restored.
1386 original seek position will NOT be restored.
1414
1387
1415 Returns a str or buffer of raw byte data.
1388 Returns a str or buffer of raw byte data.
1416
1389
1417 Raises if the requested number of bytes could not be read.
1390 Raises if the requested number of bytes could not be read.
1418 """
1391 """
1419 # Cache data both forward and backward around the requested
1392 # Cache data both forward and backward around the requested
1420 # data, in a fixed size window. This helps speed up operations
1393 # data, in a fixed size window. This helps speed up operations
1421 # involving reading the revlog backwards.
1394 # involving reading the revlog backwards.
1422 cachesize = self._chunkcachesize
1395 cachesize = self._chunkcachesize
1423 realoffset = offset & ~(cachesize - 1)
1396 realoffset = offset & ~(cachesize - 1)
1424 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1397 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
1425 - realoffset)
1398 - realoffset)
1426 with self._datareadfp(df) as df:
1399 with self._datareadfp(df) as df:
1427 df.seek(realoffset)
1400 df.seek(realoffset)
1428 d = df.read(reallength)
1401 d = df.read(reallength)
1429
1402
1430 self._cachesegment(realoffset, d)
1403 self._cachesegment(realoffset, d)
1431 if offset != realoffset or reallength != length:
1404 if offset != realoffset or reallength != length:
1432 startoffset = offset - realoffset
1405 startoffset = offset - realoffset
1433 if len(d) - startoffset < length:
1406 if len(d) - startoffset < length:
1434 raise error.RevlogError(
1407 raise error.RevlogError(
1435 _('partial read of revlog %s; expected %d bytes from '
1408 _('partial read of revlog %s; expected %d bytes from '
1436 'offset %d, got %d') %
1409 'offset %d, got %d') %
1437 (self.indexfile if self._inline else self.datafile,
1410 (self.indexfile if self._inline else self.datafile,
1438 length, realoffset, len(d) - startoffset))
1411 length, realoffset, len(d) - startoffset))
1439
1412
1440 return util.buffer(d, startoffset, length)
1413 return util.buffer(d, startoffset, length)
1441
1414
1442 if len(d) < length:
1415 if len(d) < length:
1443 raise error.RevlogError(
1416 raise error.RevlogError(
1444 _('partial read of revlog %s; expected %d bytes from offset '
1417 _('partial read of revlog %s; expected %d bytes from offset '
1445 '%d, got %d') %
1418 '%d, got %d') %
1446 (self.indexfile if self._inline else self.datafile,
1419 (self.indexfile if self._inline else self.datafile,
1447 length, offset, len(d)))
1420 length, offset, len(d)))
1448
1421
1449 return d
1422 return d
1450
1423
1451 def _getsegment(self, offset, length, df=None):
1424 def _getsegment(self, offset, length, df=None):
1452 """Obtain a segment of raw data from the revlog.
1425 """Obtain a segment of raw data from the revlog.
1453
1426
1454 Accepts an absolute offset, length of bytes to obtain, and an
1427 Accepts an absolute offset, length of bytes to obtain, and an
1455 optional file handle to the already-opened revlog. If the file
1428 optional file handle to the already-opened revlog. If the file
1456 handle is used, it's original seek position will not be preserved.
1429 handle is used, it's original seek position will not be preserved.
1457
1430
1458 Requests for data may be returned from a cache.
1431 Requests for data may be returned from a cache.
1459
1432
1460 Returns a str or a buffer instance of raw byte data.
1433 Returns a str or a buffer instance of raw byte data.
1461 """
1434 """
1462 o, d = self._chunkcache
1435 o, d = self._chunkcache
1463 l = len(d)
1436 l = len(d)
1464
1437
1465 # is it in the cache?
1438 # is it in the cache?
1466 cachestart = offset - o
1439 cachestart = offset - o
1467 cacheend = cachestart + length
1440 cacheend = cachestart + length
1468 if cachestart >= 0 and cacheend <= l:
1441 if cachestart >= 0 and cacheend <= l:
1469 if cachestart == 0 and cacheend == l:
1442 if cachestart == 0 and cacheend == l:
1470 return d # avoid a copy
1443 return d # avoid a copy
1471 return util.buffer(d, cachestart, cacheend - cachestart)
1444 return util.buffer(d, cachestart, cacheend - cachestart)
1472
1445
1473 return self._readsegment(offset, length, df=df)
1446 return self._readsegment(offset, length, df=df)
1474
1447
1475 def _getsegmentforrevs(self, startrev, endrev, df=None):
1448 def _getsegmentforrevs(self, startrev, endrev, df=None):
1476 """Obtain a segment of raw data corresponding to a range of revisions.
1449 """Obtain a segment of raw data corresponding to a range of revisions.
1477
1450
1478 Accepts the start and end revisions and an optional already-open
1451 Accepts the start and end revisions and an optional already-open
1479 file handle to be used for reading. If the file handle is read, its
1452 file handle to be used for reading. If the file handle is read, its
1480 seek position will not be preserved.
1453 seek position will not be preserved.
1481
1454
1482 Requests for data may be satisfied by a cache.
1455 Requests for data may be satisfied by a cache.
1483
1456
1484 Returns a 2-tuple of (offset, data) for the requested range of
1457 Returns a 2-tuple of (offset, data) for the requested range of
1485 revisions. Offset is the integer offset from the beginning of the
1458 revisions. Offset is the integer offset from the beginning of the
1486 revlog and data is a str or buffer of the raw byte data.
1459 revlog and data is a str or buffer of the raw byte data.
1487
1460
1488 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1461 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1489 to determine where each revision's data begins and ends.
1462 to determine where each revision's data begins and ends.
1490 """
1463 """
1491 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1464 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1492 # (functions are expensive).
1465 # (functions are expensive).
1493 index = self.index
1466 index = self.index
1494 istart = index[startrev]
1467 istart = index[startrev]
1495 start = int(istart[0] >> 16)
1468 start = int(istart[0] >> 16)
1496 if startrev == endrev:
1469 if startrev == endrev:
1497 end = start + istart[1]
1470 end = start + istart[1]
1498 else:
1471 else:
1499 iend = index[endrev]
1472 iend = index[endrev]
1500 end = int(iend[0] >> 16) + iend[1]
1473 end = int(iend[0] >> 16) + iend[1]
1501
1474
1502 if self._inline:
1475 if self._inline:
1503 start += (startrev + 1) * self._io.size
1476 start += (startrev + 1) * self._io.size
1504 end += (endrev + 1) * self._io.size
1477 end += (endrev + 1) * self._io.size
1505 length = end - start
1478 length = end - start
1506
1479
1507 return start, self._getsegment(start, length, df=df)
1480 return start, self._getsegment(start, length, df=df)
1508
1481
1509 def _chunk(self, rev, df=None):
1482 def _chunk(self, rev, df=None):
1510 """Obtain a single decompressed chunk for a revision.
1483 """Obtain a single decompressed chunk for a revision.
1511
1484
1512 Accepts an integer revision and an optional already-open file handle
1485 Accepts an integer revision and an optional already-open file handle
1513 to be used for reading. If used, the seek position of the file will not
1486 to be used for reading. If used, the seek position of the file will not
1514 be preserved.
1487 be preserved.
1515
1488
1516 Returns a str holding uncompressed data for the requested revision.
1489 Returns a str holding uncompressed data for the requested revision.
1517 """
1490 """
1518 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1491 return self.decompress(self._getsegmentforrevs(rev, rev, df=df)[1])
1519
1492
1520 def _chunks(self, revs, df=None, targetsize=None):
1493 def _chunks(self, revs, df=None, targetsize=None):
1521 """Obtain decompressed chunks for the specified revisions.
1494 """Obtain decompressed chunks for the specified revisions.
1522
1495
1523 Accepts an iterable of numeric revisions that are assumed to be in
1496 Accepts an iterable of numeric revisions that are assumed to be in
1524 ascending order. Also accepts an optional already-open file handle
1497 ascending order. Also accepts an optional already-open file handle
1525 to be used for reading. If used, the seek position of the file will
1498 to be used for reading. If used, the seek position of the file will
1526 not be preserved.
1499 not be preserved.
1527
1500
1528 This function is similar to calling ``self._chunk()`` multiple times,
1501 This function is similar to calling ``self._chunk()`` multiple times,
1529 but is faster.
1502 but is faster.
1530
1503
1531 Returns a list with decompressed data for each requested revision.
1504 Returns a list with decompressed data for each requested revision.
1532 """
1505 """
1533 if not revs:
1506 if not revs:
1534 return []
1507 return []
1535 start = self.start
1508 start = self.start
1536 length = self.length
1509 length = self.length
1537 inline = self._inline
1510 inline = self._inline
1538 iosize = self._io.size
1511 iosize = self._io.size
1539 buffer = util.buffer
1512 buffer = util.buffer
1540
1513
1541 l = []
1514 l = []
1542 ladd = l.append
1515 ladd = l.append
1543
1516
1544 if not self._withsparseread:
1517 if not self._withsparseread:
1545 slicedchunks = (revs,)
1518 slicedchunks = (revs,)
1546 else:
1519 else:
1547 slicedchunks = deltautil.slicechunk(self, revs,
1520 slicedchunks = deltautil.slicechunk(self, revs,
1548 targetsize=targetsize)
1521 targetsize=targetsize)
1549
1522
1550 for revschunk in slicedchunks:
1523 for revschunk in slicedchunks:
1551 firstrev = revschunk[0]
1524 firstrev = revschunk[0]
1552 # Skip trailing revisions with empty diff
1525 # Skip trailing revisions with empty diff
1553 for lastrev in revschunk[::-1]:
1526 for lastrev in revschunk[::-1]:
1554 if length(lastrev) != 0:
1527 if length(lastrev) != 0:
1555 break
1528 break
1556
1529
1557 try:
1530 try:
1558 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1531 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1559 except OverflowError:
1532 except OverflowError:
1560 # issue4215 - we can't cache a run of chunks greater than
1533 # issue4215 - we can't cache a run of chunks greater than
1561 # 2G on Windows
1534 # 2G on Windows
1562 return [self._chunk(rev, df=df) for rev in revschunk]
1535 return [self._chunk(rev, df=df) for rev in revschunk]
1563
1536
1564 decomp = self.decompress
1537 decomp = self.decompress
1565 for rev in revschunk:
1538 for rev in revschunk:
1566 chunkstart = start(rev)
1539 chunkstart = start(rev)
1567 if inline:
1540 if inline:
1568 chunkstart += (rev + 1) * iosize
1541 chunkstart += (rev + 1) * iosize
1569 chunklength = length(rev)
1542 chunklength = length(rev)
1570 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1543 ladd(decomp(buffer(data, chunkstart - offset, chunklength)))
1571
1544
1572 return l
1545 return l
1573
1546
1574 def _chunkclear(self):
1547 def _chunkclear(self):
1575 """Clear the raw chunk cache."""
1548 """Clear the raw chunk cache."""
1576 self._chunkcache = (0, '')
1549 self._chunkcache = (0, '')
1577
1550
1578 def deltaparent(self, rev):
1551 def deltaparent(self, rev):
1579 """return deltaparent of the given revision"""
1552 """return deltaparent of the given revision"""
1580 base = self.index[rev][3]
1553 base = self.index[rev][3]
1581 if base == rev:
1554 if base == rev:
1582 return nullrev
1555 return nullrev
1583 elif self._generaldelta:
1556 elif self._generaldelta:
1584 return base
1557 return base
1585 else:
1558 else:
1586 return rev - 1
1559 return rev - 1
1587
1560
1588 def issnapshot(self, rev):
1561 def issnapshot(self, rev):
1589 """tells whether rev is a snapshot
1562 """tells whether rev is a snapshot
1590 """
1563 """
1591 if not self._sparserevlog:
1564 if not self._sparserevlog:
1592 return self.deltaparent(rev) == nullrev
1565 return self.deltaparent(rev) == nullrev
1593 elif util.safehasattr(self.index, 'issnapshot'):
1566 elif util.safehasattr(self.index, 'issnapshot'):
1594 # directly assign the method to cache the testing and access
1567 # directly assign the method to cache the testing and access
1595 self.issnapshot = self.index.issnapshot
1568 self.issnapshot = self.index.issnapshot
1596 return self.issnapshot(rev)
1569 return self.issnapshot(rev)
1597 if rev == nullrev:
1570 if rev == nullrev:
1598 return True
1571 return True
1599 entry = self.index[rev]
1572 entry = self.index[rev]
1600 base = entry[3]
1573 base = entry[3]
1601 if base == rev:
1574 if base == rev:
1602 return True
1575 return True
1603 if base == nullrev:
1576 if base == nullrev:
1604 return True
1577 return True
1605 p1 = entry[5]
1578 p1 = entry[5]
1606 p2 = entry[6]
1579 p2 = entry[6]
1607 if base == p1 or base == p2:
1580 if base == p1 or base == p2:
1608 return False
1581 return False
1609 return self.issnapshot(base)
1582 return self.issnapshot(base)
1610
1583
1611 def snapshotdepth(self, rev):
1584 def snapshotdepth(self, rev):
1612 """number of snapshot in the chain before this one"""
1585 """number of snapshot in the chain before this one"""
1613 if not self.issnapshot(rev):
1586 if not self.issnapshot(rev):
1614 raise error.ProgrammingError('revision %d not a snapshot')
1587 raise error.ProgrammingError('revision %d not a snapshot')
1615 return len(self._deltachain(rev)[0]) - 1
1588 return len(self._deltachain(rev)[0]) - 1
1616
1589
1617 def revdiff(self, rev1, rev2):
1590 def revdiff(self, rev1, rev2):
1618 """return or calculate a delta between two revisions
1591 """return or calculate a delta between two revisions
1619
1592
1620 The delta calculated is in binary form and is intended to be written to
1593 The delta calculated is in binary form and is intended to be written to
1621 revlog data directly. So this function needs raw revision data.
1594 revlog data directly. So this function needs raw revision data.
1622 """
1595 """
1623 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1596 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1624 return bytes(self._chunk(rev2))
1597 return bytes(self._chunk(rev2))
1625
1598
1626 return mdiff.textdiff(self.revision(rev1, raw=True),
1599 return mdiff.textdiff(self.revision(rev1, raw=True),
1627 self.revision(rev2, raw=True))
1600 self.revision(rev2, raw=True))
1628
1601
1629 def revision(self, nodeorrev, _df=None, raw=False):
1602 def revision(self, nodeorrev, _df=None, raw=False):
1630 """return an uncompressed revision of a given node or revision
1603 """return an uncompressed revision of a given node or revision
1631 number.
1604 number.
1632
1605
1633 _df - an existing file handle to read from. (internal-only)
1606 _df - an existing file handle to read from. (internal-only)
1634 raw - an optional argument specifying if the revision data is to be
1607 raw - an optional argument specifying if the revision data is to be
1635 treated as raw data when applying flag transforms. 'raw' should be set
1608 treated as raw data when applying flag transforms. 'raw' should be set
1636 to True when generating changegroups or in debug commands.
1609 to True when generating changegroups or in debug commands.
1637 """
1610 """
1638 return self._revisiondata(nodeorrev, _df, raw=raw)
1611 return self._revisiondata(nodeorrev, _df, raw=raw)
1639
1612
1640 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1613 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1641 if isinstance(nodeorrev, int):
1614 if isinstance(nodeorrev, int):
1642 rev = nodeorrev
1615 rev = nodeorrev
1643 node = self.node(rev)
1616 node = self.node(rev)
1644 else:
1617 else:
1645 node = nodeorrev
1618 node = nodeorrev
1646 rev = None
1619 rev = None
1647
1620
1648 cachedrev = None
1621 cachedrev = None
1649 flags = None
1622 flags = None
1650 rawtext = None
1623 rawtext = None
1651 if node == nullid:
1624 if node == nullid:
1652 return ""
1625 return ""
1653 if self._revisioncache:
1626 if self._revisioncache:
1654 if self._revisioncache[0] == node:
1627 if self._revisioncache[0] == node:
1655 # _cache only stores rawtext
1628 # _cache only stores rawtext
1656 if raw:
1629 if raw:
1657 return self._revisioncache[2]
1630 return self._revisioncache[2]
1658 # duplicated, but good for perf
1631 # duplicated, but good for perf
1659 if rev is None:
1632 if rev is None:
1660 rev = self.rev(node)
1633 rev = self.rev(node)
1661 if flags is None:
1634 if flags is None:
1662 flags = self.flags(rev)
1635 flags = self.flags(rev)
1663 # no extra flags set, no flag processor runs, text = rawtext
1636 # no extra flags set, no flag processor runs, text = rawtext
1664 if flags == REVIDX_DEFAULT_FLAGS:
1637 if flags == REVIDX_DEFAULT_FLAGS:
1665 return self._revisioncache[2]
1638 return self._revisioncache[2]
1666 # rawtext is reusable. need to run flag processor
1639 # rawtext is reusable. need to run flag processor
1667 rawtext = self._revisioncache[2]
1640 rawtext = self._revisioncache[2]
1668
1641
1669 cachedrev = self._revisioncache[1]
1642 cachedrev = self._revisioncache[1]
1670
1643
1671 # look up what we need to read
1644 # look up what we need to read
1672 if rawtext is None:
1645 if rawtext is None:
1673 if rev is None:
1646 if rev is None:
1674 rev = self.rev(node)
1647 rev = self.rev(node)
1675
1648
1676 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1649 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
1677 if stopped:
1650 if stopped:
1678 rawtext = self._revisioncache[2]
1651 rawtext = self._revisioncache[2]
1679
1652
1680 # drop cache to save memory
1653 # drop cache to save memory
1681 self._revisioncache = None
1654 self._revisioncache = None
1682
1655
1683 targetsize = None
1656 targetsize = None
1684 rawsize = self.index[rev][2]
1657 rawsize = self.index[rev][2]
1685 if 0 <= rawsize:
1658 if 0 <= rawsize:
1686 targetsize = 4 * rawsize
1659 targetsize = 4 * rawsize
1687
1660
1688 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1661 bins = self._chunks(chain, df=_df, targetsize=targetsize)
1689 if rawtext is None:
1662 if rawtext is None:
1690 rawtext = bytes(bins[0])
1663 rawtext = bytes(bins[0])
1691 bins = bins[1:]
1664 bins = bins[1:]
1692
1665
1693 rawtext = mdiff.patches(rawtext, bins)
1666 rawtext = mdiff.patches(rawtext, bins)
1694 self._revisioncache = (node, rev, rawtext)
1667 self._revisioncache = (node, rev, rawtext)
1695
1668
1696 if flags is None:
1669 if flags is None:
1697 if rev is None:
1670 if rev is None:
1698 rev = self.rev(node)
1671 rev = self.rev(node)
1699 flags = self.flags(rev)
1672 flags = self.flags(rev)
1700
1673
1701 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1674 text, validatehash = self._processflags(rawtext, flags, 'read', raw=raw)
1702 if validatehash:
1675 if validatehash:
1703 self.checkhash(text, node, rev=rev)
1676 self.checkhash(text, node, rev=rev)
1704
1677
1705 return text
1678 return text
1706
1679
1707 def rawdata(self, nodeorrev, _df=None, raw=False):
1680 def rawdata(self, nodeorrev, _df=None, raw=False):
1708 """return an uncompressed raw data of a given node or revision number.
1681 """return an uncompressed raw data of a given node or revision number.
1709
1682
1710 _df - an existing file handle to read from. (internal-only)
1683 _df - an existing file handle to read from. (internal-only)
1711 """
1684 """
1712 return self._revisiondata(nodeorrev, _df, raw=True)
1685 return self._revisiondata(nodeorrev, _df, raw=True)
1713
1686
1714 def hash(self, text, p1, p2):
1687 def hash(self, text, p1, p2):
1715 """Compute a node hash.
1688 """Compute a node hash.
1716
1689
1717 Available as a function so that subclasses can replace the hash
1690 Available as a function so that subclasses can replace the hash
1718 as needed.
1691 as needed.
1719 """
1692 """
1720 return storageutil.hashrevisionsha1(text, p1, p2)
1693 return storageutil.hashrevisionsha1(text, p1, p2)
1721
1694
1722 def _processflags(self, text, flags, operation, raw=False):
1695 def _processflags(self, text, flags, operation, raw=False):
1723 """Inspect revision data flags and applies transforms defined by
1696 """Inspect revision data flags and applies transforms defined by
1724 registered flag processors.
1697 registered flag processors.
1725
1698
1726 ``text`` - the revision data to process
1699 ``text`` - the revision data to process
1727 ``flags`` - the revision flags
1700 ``flags`` - the revision flags
1728 ``operation`` - the operation being performed (read or write)
1701 ``operation`` - the operation being performed (read or write)
1729 ``raw`` - an optional argument describing if the raw transform should be
1702 ``raw`` - an optional argument describing if the raw transform should be
1730 applied.
1703 applied.
1731
1704
1732 This method processes the flags in the order (or reverse order if
1705 This method processes the flags in the order (or reverse order if
1733 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1706 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
1734 flag processors registered for present flags. The order of flags defined
1707 flag processors registered for present flags. The order of flags defined
1735 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1708 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
1736
1709
1737 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1710 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
1738 processed text and ``validatehash`` is a bool indicating whether the
1711 processed text and ``validatehash`` is a bool indicating whether the
1739 returned text should be checked for hash integrity.
1712 returned text should be checked for hash integrity.
1740
1713
1741 Note: If the ``raw`` argument is set, it has precedence over the
1714 Note: If the ``raw`` argument is set, it has precedence over the
1742 operation and will only update the value of ``validatehash``.
1715 operation and will only update the value of ``validatehash``.
1743 """
1716 """
1744 # fast path: no flag processors will run
1717 # fast path: no flag processors will run
1745 if flags == 0:
1718 if flags == 0:
1746 return text, True
1719 return text, True
1747 if not operation in ('read', 'write'):
1720 if not operation in ('read', 'write'):
1748 raise error.ProgrammingError(_("invalid '%s' operation") %
1721 raise error.ProgrammingError(_("invalid '%s' operation") %
1749 operation)
1722 operation)
1750 # Check all flags are known.
1723 # Check all flags are known.
1751 if flags & ~flagutil.REVIDX_KNOWN_FLAGS:
1724 if flags & ~flagutil.REVIDX_KNOWN_FLAGS:
1752 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1725 raise error.RevlogError(_("incompatible revision flag '%#x'") %
1753 (flags & ~flagutil.REVIDX_KNOWN_FLAGS))
1726 (flags & ~flagutil.REVIDX_KNOWN_FLAGS))
1754 validatehash = True
1727 validatehash = True
1755 # Depending on the operation (read or write), the order might be
1728 # Depending on the operation (read or write), the order might be
1756 # reversed due to non-commutative transforms.
1729 # reversed due to non-commutative transforms.
1757 orderedflags = REVIDX_FLAGS_ORDER
1730 orderedflags = REVIDX_FLAGS_ORDER
1758 if operation == 'write':
1731 if operation == 'write':
1759 orderedflags = reversed(orderedflags)
1732 orderedflags = reversed(orderedflags)
1760
1733
1761 for flag in orderedflags:
1734 for flag in orderedflags:
1762 # If a flagprocessor has been registered for a known flag, apply the
1735 # If a flagprocessor has been registered for a known flag, apply the
1763 # related operation transform and update result tuple.
1736 # related operation transform and update result tuple.
1764 if flag & flags:
1737 if flag & flags:
1765 vhash = True
1738 vhash = True
1766
1739
1767 if flag not in self._flagprocessors:
1740 if flag not in self._flagprocessors:
1768 message = _("missing processor for flag '%#x'") % (flag)
1741 message = _("missing processor for flag '%#x'") % (flag)
1769 raise error.RevlogError(message)
1742 raise error.RevlogError(message)
1770
1743
1771 processor = self._flagprocessors[flag]
1744 processor = self._flagprocessors[flag]
1772 if processor is not None:
1745 if processor is not None:
1773 readtransform, writetransform, rawtransform = processor
1746 readtransform, writetransform, rawtransform = processor
1774
1747
1775 if raw:
1748 if raw:
1776 vhash = rawtransform(self, text)
1749 vhash = rawtransform(self, text)
1777 elif operation == 'read':
1750 elif operation == 'read':
1778 text, vhash = readtransform(self, text)
1751 text, vhash = readtransform(self, text)
1779 else: # write operation
1752 else: # write operation
1780 text, vhash = writetransform(self, text)
1753 text, vhash = writetransform(self, text)
1781 validatehash = validatehash and vhash
1754 validatehash = validatehash and vhash
1782
1755
1783 return text, validatehash
1756 return text, validatehash
1784
1757
1785 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1758 def checkhash(self, text, node, p1=None, p2=None, rev=None):
1786 """Check node hash integrity.
1759 """Check node hash integrity.
1787
1760
1788 Available as a function so that subclasses can extend hash mismatch
1761 Available as a function so that subclasses can extend hash mismatch
1789 behaviors as needed.
1762 behaviors as needed.
1790 """
1763 """
1791 try:
1764 try:
1792 if p1 is None and p2 is None:
1765 if p1 is None and p2 is None:
1793 p1, p2 = self.parents(node)
1766 p1, p2 = self.parents(node)
1794 if node != self.hash(text, p1, p2):
1767 if node != self.hash(text, p1, p2):
1795 # Clear the revision cache on hash failure. The revision cache
1768 # Clear the revision cache on hash failure. The revision cache
1796 # only stores the raw revision and clearing the cache does have
1769 # only stores the raw revision and clearing the cache does have
1797 # the side-effect that we won't have a cache hit when the raw
1770 # the side-effect that we won't have a cache hit when the raw
1798 # revision data is accessed. But this case should be rare and
1771 # revision data is accessed. But this case should be rare and
1799 # it is extra work to teach the cache about the hash
1772 # it is extra work to teach the cache about the hash
1800 # verification state.
1773 # verification state.
1801 if self._revisioncache and self._revisioncache[0] == node:
1774 if self._revisioncache and self._revisioncache[0] == node:
1802 self._revisioncache = None
1775 self._revisioncache = None
1803
1776
1804 revornode = rev
1777 revornode = rev
1805 if revornode is None:
1778 if revornode is None:
1806 revornode = templatefilters.short(hex(node))
1779 revornode = templatefilters.short(hex(node))
1807 raise error.RevlogError(_("integrity check failed on %s:%s")
1780 raise error.RevlogError(_("integrity check failed on %s:%s")
1808 % (self.indexfile, pycompat.bytestr(revornode)))
1781 % (self.indexfile, pycompat.bytestr(revornode)))
1809 except error.RevlogError:
1782 except error.RevlogError:
1810 if self._censorable and storageutil.iscensoredtext(text):
1783 if self._censorable and storageutil.iscensoredtext(text):
1811 raise error.CensoredNodeError(self.indexfile, node, text)
1784 raise error.CensoredNodeError(self.indexfile, node, text)
1812 raise
1785 raise
1813
1786
1814 def _enforceinlinesize(self, tr, fp=None):
1787 def _enforceinlinesize(self, tr, fp=None):
1815 """Check if the revlog is too big for inline and convert if so.
1788 """Check if the revlog is too big for inline and convert if so.
1816
1789
1817 This should be called after revisions are added to the revlog. If the
1790 This should be called after revisions are added to the revlog. If the
1818 revlog has grown too large to be an inline revlog, it will convert it
1791 revlog has grown too large to be an inline revlog, it will convert it
1819 to use multiple index and data files.
1792 to use multiple index and data files.
1820 """
1793 """
1821 tiprev = len(self) - 1
1794 tiprev = len(self) - 1
1822 if (not self._inline or
1795 if (not self._inline or
1823 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1796 (self.start(tiprev) + self.length(tiprev)) < _maxinline):
1824 return
1797 return
1825
1798
1826 trinfo = tr.find(self.indexfile)
1799 trinfo = tr.find(self.indexfile)
1827 if trinfo is None:
1800 if trinfo is None:
1828 raise error.RevlogError(_("%s not found in the transaction")
1801 raise error.RevlogError(_("%s not found in the transaction")
1829 % self.indexfile)
1802 % self.indexfile)
1830
1803
1831 trindex = trinfo[2]
1804 trindex = trinfo[2]
1832 if trindex is not None:
1805 if trindex is not None:
1833 dataoff = self.start(trindex)
1806 dataoff = self.start(trindex)
1834 else:
1807 else:
1835 # revlog was stripped at start of transaction, use all leftover data
1808 # revlog was stripped at start of transaction, use all leftover data
1836 trindex = len(self) - 1
1809 trindex = len(self) - 1
1837 dataoff = self.end(tiprev)
1810 dataoff = self.end(tiprev)
1838
1811
1839 tr.add(self.datafile, dataoff)
1812 tr.add(self.datafile, dataoff)
1840
1813
1841 if fp:
1814 if fp:
1842 fp.flush()
1815 fp.flush()
1843 fp.close()
1816 fp.close()
1844 # We can't use the cached file handle after close(). So prevent
1817 # We can't use the cached file handle after close(). So prevent
1845 # its usage.
1818 # its usage.
1846 self._writinghandles = None
1819 self._writinghandles = None
1847
1820
1848 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1821 with self._indexfp('r') as ifh, self._datafp('w') as dfh:
1849 for r in self:
1822 for r in self:
1850 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1823 dfh.write(self._getsegmentforrevs(r, r, df=ifh)[1])
1851
1824
1852 with self._indexfp('w') as fp:
1825 with self._indexfp('w') as fp:
1853 self.version &= ~FLAG_INLINE_DATA
1826 self.version &= ~FLAG_INLINE_DATA
1854 self._inline = False
1827 self._inline = False
1855 io = self._io
1828 io = self._io
1856 for i in self:
1829 for i in self:
1857 e = io.packentry(self.index[i], self.node, self.version, i)
1830 e = io.packentry(self.index[i], self.node, self.version, i)
1858 fp.write(e)
1831 fp.write(e)
1859
1832
1860 # the temp file replace the real index when we exit the context
1833 # the temp file replace the real index when we exit the context
1861 # manager
1834 # manager
1862
1835
1863 tr.replace(self.indexfile, trindex * self._io.size)
1836 tr.replace(self.indexfile, trindex * self._io.size)
1864 self._chunkclear()
1837 self._chunkclear()
1865
1838
1866 def _nodeduplicatecallback(self, transaction, node):
1839 def _nodeduplicatecallback(self, transaction, node):
1867 """called when trying to add a node already stored.
1840 """called when trying to add a node already stored.
1868 """
1841 """
1869
1842
1870 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1843 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1871 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1844 node=None, flags=REVIDX_DEFAULT_FLAGS, deltacomputer=None):
1872 """add a revision to the log
1845 """add a revision to the log
1873
1846
1874 text - the revision data to add
1847 text - the revision data to add
1875 transaction - the transaction object used for rollback
1848 transaction - the transaction object used for rollback
1876 link - the linkrev data to add
1849 link - the linkrev data to add
1877 p1, p2 - the parent nodeids of the revision
1850 p1, p2 - the parent nodeids of the revision
1878 cachedelta - an optional precomputed delta
1851 cachedelta - an optional precomputed delta
1879 node - nodeid of revision; typically node is not specified, and it is
1852 node - nodeid of revision; typically node is not specified, and it is
1880 computed by default as hash(text, p1, p2), however subclasses might
1853 computed by default as hash(text, p1, p2), however subclasses might
1881 use different hashing method (and override checkhash() in such case)
1854 use different hashing method (and override checkhash() in such case)
1882 flags - the known flags to set on the revision
1855 flags - the known flags to set on the revision
1883 deltacomputer - an optional deltacomputer instance shared between
1856 deltacomputer - an optional deltacomputer instance shared between
1884 multiple calls
1857 multiple calls
1885 """
1858 """
1886 if link == nullrev:
1859 if link == nullrev:
1887 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1860 raise error.RevlogError(_("attempted to add linkrev -1 to %s")
1888 % self.indexfile)
1861 % self.indexfile)
1889
1862
1890 if flags:
1863 if flags:
1891 node = node or self.hash(text, p1, p2)
1864 node = node or self.hash(text, p1, p2)
1892
1865
1893 rawtext, validatehash = self._processflags(text, flags, 'write')
1866 rawtext, validatehash = self._processflags(text, flags, 'write')
1894
1867
1895 # If the flag processor modifies the revision data, ignore any provided
1868 # If the flag processor modifies the revision data, ignore any provided
1896 # cachedelta.
1869 # cachedelta.
1897 if rawtext != text:
1870 if rawtext != text:
1898 cachedelta = None
1871 cachedelta = None
1899
1872
1900 if len(rawtext) > _maxentrysize:
1873 if len(rawtext) > _maxentrysize:
1901 raise error.RevlogError(
1874 raise error.RevlogError(
1902 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1875 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1903 % (self.indexfile, len(rawtext)))
1876 % (self.indexfile, len(rawtext)))
1904
1877
1905 node = node or self.hash(rawtext, p1, p2)
1878 node = node or self.hash(rawtext, p1, p2)
1906 if node in self.nodemap:
1879 if node in self.nodemap:
1907 return node
1880 return node
1908
1881
1909 if validatehash:
1882 if validatehash:
1910 self.checkhash(rawtext, node, p1=p1, p2=p2)
1883 self.checkhash(rawtext, node, p1=p1, p2=p2)
1911
1884
1912 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1885 return self.addrawrevision(rawtext, transaction, link, p1, p2, node,
1913 flags, cachedelta=cachedelta,
1886 flags, cachedelta=cachedelta,
1914 deltacomputer=deltacomputer)
1887 deltacomputer=deltacomputer)
1915
1888
1916 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1889 def addrawrevision(self, rawtext, transaction, link, p1, p2, node, flags,
1917 cachedelta=None, deltacomputer=None):
1890 cachedelta=None, deltacomputer=None):
1918 """add a raw revision with known flags, node and parents
1891 """add a raw revision with known flags, node and parents
1919 useful when reusing a revision not stored in this revlog (ex: received
1892 useful when reusing a revision not stored in this revlog (ex: received
1920 over wire, or read from an external bundle).
1893 over wire, or read from an external bundle).
1921 """
1894 """
1922 dfh = None
1895 dfh = None
1923 if not self._inline:
1896 if not self._inline:
1924 dfh = self._datafp("a+")
1897 dfh = self._datafp("a+")
1925 ifh = self._indexfp("a+")
1898 ifh = self._indexfp("a+")
1926 try:
1899 try:
1927 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1900 return self._addrevision(node, rawtext, transaction, link, p1, p2,
1928 flags, cachedelta, ifh, dfh,
1901 flags, cachedelta, ifh, dfh,
1929 deltacomputer=deltacomputer)
1902 deltacomputer=deltacomputer)
1930 finally:
1903 finally:
1931 if dfh:
1904 if dfh:
1932 dfh.close()
1905 dfh.close()
1933 ifh.close()
1906 ifh.close()
1934
1907
1935 def compress(self, data):
1908 def compress(self, data):
1936 """Generate a possibly-compressed representation of data."""
1909 """Generate a possibly-compressed representation of data."""
1937 if not data:
1910 if not data:
1938 return '', data
1911 return '', data
1939
1912
1940 compressed = self._compressor.compress(data)
1913 compressed = self._compressor.compress(data)
1941
1914
1942 if compressed:
1915 if compressed:
1943 # The revlog compressor added the header in the returned data.
1916 # The revlog compressor added the header in the returned data.
1944 return '', compressed
1917 return '', compressed
1945
1918
1946 if data[0:1] == '\0':
1919 if data[0:1] == '\0':
1947 return '', data
1920 return '', data
1948 return 'u', data
1921 return 'u', data
1949
1922
1950 def decompress(self, data):
1923 def decompress(self, data):
1951 """Decompress a revlog chunk.
1924 """Decompress a revlog chunk.
1952
1925
1953 The chunk is expected to begin with a header identifying the
1926 The chunk is expected to begin with a header identifying the
1954 format type so it can be routed to an appropriate decompressor.
1927 format type so it can be routed to an appropriate decompressor.
1955 """
1928 """
1956 if not data:
1929 if not data:
1957 return data
1930 return data
1958
1931
1959 # Revlogs are read much more frequently than they are written and many
1932 # Revlogs are read much more frequently than they are written and many
1960 # chunks only take microseconds to decompress, so performance is
1933 # chunks only take microseconds to decompress, so performance is
1961 # important here.
1934 # important here.
1962 #
1935 #
1963 # We can make a few assumptions about revlogs:
1936 # We can make a few assumptions about revlogs:
1964 #
1937 #
1965 # 1) the majority of chunks will be compressed (as opposed to inline
1938 # 1) the majority of chunks will be compressed (as opposed to inline
1966 # raw data).
1939 # raw data).
1967 # 2) decompressing *any* data will likely by at least 10x slower than
1940 # 2) decompressing *any* data will likely by at least 10x slower than
1968 # returning raw inline data.
1941 # returning raw inline data.
1969 # 3) we want to prioritize common and officially supported compression
1942 # 3) we want to prioritize common and officially supported compression
1970 # engines
1943 # engines
1971 #
1944 #
1972 # It follows that we want to optimize for "decompress compressed data
1945 # It follows that we want to optimize for "decompress compressed data
1973 # when encoded with common and officially supported compression engines"
1946 # when encoded with common and officially supported compression engines"
1974 # case over "raw data" and "data encoded by less common or non-official
1947 # case over "raw data" and "data encoded by less common or non-official
1975 # compression engines." That is why we have the inline lookup first
1948 # compression engines." That is why we have the inline lookup first
1976 # followed by the compengines lookup.
1949 # followed by the compengines lookup.
1977 #
1950 #
1978 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1951 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
1979 # compressed chunks. And this matters for changelog and manifest reads.
1952 # compressed chunks. And this matters for changelog and manifest reads.
1980 t = data[0:1]
1953 t = data[0:1]
1981
1954
1982 if t == 'x':
1955 if t == 'x':
1983 try:
1956 try:
1984 return _zlibdecompress(data)
1957 return _zlibdecompress(data)
1985 except zlib.error as e:
1958 except zlib.error as e:
1986 raise error.RevlogError(_('revlog decompress error: %s') %
1959 raise error.RevlogError(_('revlog decompress error: %s') %
1987 stringutil.forcebytestr(e))
1960 stringutil.forcebytestr(e))
1988 # '\0' is more common than 'u' so it goes first.
1961 # '\0' is more common than 'u' so it goes first.
1989 elif t == '\0':
1962 elif t == '\0':
1990 return data
1963 return data
1991 elif t == 'u':
1964 elif t == 'u':
1992 return util.buffer(data, 1)
1965 return util.buffer(data, 1)
1993
1966
1994 try:
1967 try:
1995 compressor = self._decompressors[t]
1968 compressor = self._decompressors[t]
1996 except KeyError:
1969 except KeyError:
1997 try:
1970 try:
1998 engine = util.compengines.forrevlogheader(t)
1971 engine = util.compengines.forrevlogheader(t)
1999 compressor = engine.revlogcompressor(self._compengineopts)
1972 compressor = engine.revlogcompressor(self._compengineopts)
2000 self._decompressors[t] = compressor
1973 self._decompressors[t] = compressor
2001 except KeyError:
1974 except KeyError:
2002 raise error.RevlogError(_('unknown compression type %r') % t)
1975 raise error.RevlogError(_('unknown compression type %r') % t)
2003
1976
2004 return compressor.decompress(data)
1977 return compressor.decompress(data)
2005
1978
2006 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
1979 def _addrevision(self, node, rawtext, transaction, link, p1, p2, flags,
2007 cachedelta, ifh, dfh, alwayscache=False,
1980 cachedelta, ifh, dfh, alwayscache=False,
2008 deltacomputer=None):
1981 deltacomputer=None):
2009 """internal function to add revisions to the log
1982 """internal function to add revisions to the log
2010
1983
2011 see addrevision for argument descriptions.
1984 see addrevision for argument descriptions.
2012
1985
2013 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
1986 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2014
1987
2015 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
1988 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2016 be used.
1989 be used.
2017
1990
2018 invariants:
1991 invariants:
2019 - rawtext is optional (can be None); if not set, cachedelta must be set.
1992 - rawtext is optional (can be None); if not set, cachedelta must be set.
2020 if both are set, they must correspond to each other.
1993 if both are set, they must correspond to each other.
2021 """
1994 """
2022 if node == nullid:
1995 if node == nullid:
2023 raise error.RevlogError(_("%s: attempt to add null revision") %
1996 raise error.RevlogError(_("%s: attempt to add null revision") %
2024 self.indexfile)
1997 self.indexfile)
2025 if node == wdirid or node in wdirfilenodeids:
1998 if node == wdirid or node in wdirfilenodeids:
2026 raise error.RevlogError(_("%s: attempt to add wdir revision") %
1999 raise error.RevlogError(_("%s: attempt to add wdir revision") %
2027 self.indexfile)
2000 self.indexfile)
2028
2001
2029 if self._inline:
2002 if self._inline:
2030 fh = ifh
2003 fh = ifh
2031 else:
2004 else:
2032 fh = dfh
2005 fh = dfh
2033
2006
2034 btext = [rawtext]
2007 btext = [rawtext]
2035
2008
2036 curr = len(self)
2009 curr = len(self)
2037 prev = curr - 1
2010 prev = curr - 1
2038 offset = self.end(prev)
2011 offset = self.end(prev)
2039 p1r, p2r = self.rev(p1), self.rev(p2)
2012 p1r, p2r = self.rev(p1), self.rev(p2)
2040
2013
2041 # full versions are inserted when the needed deltas
2014 # full versions are inserted when the needed deltas
2042 # become comparable to the uncompressed text
2015 # become comparable to the uncompressed text
2043 if rawtext is None:
2016 if rawtext is None:
2044 # need rawtext size, before changed by flag processors, which is
2017 # need rawtext size, before changed by flag processors, which is
2045 # the non-raw size. use revlog explicitly to avoid filelog's extra
2018 # the non-raw size. use revlog explicitly to avoid filelog's extra
2046 # logic that might remove metadata size.
2019 # logic that might remove metadata size.
2047 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2020 textlen = mdiff.patchedsize(revlog.size(self, cachedelta[0]),
2048 cachedelta[1])
2021 cachedelta[1])
2049 else:
2022 else:
2050 textlen = len(rawtext)
2023 textlen = len(rawtext)
2051
2024
2052 if deltacomputer is None:
2025 if deltacomputer is None:
2053 deltacomputer = deltautil.deltacomputer(self)
2026 deltacomputer = deltautil.deltacomputer(self)
2054
2027
2055 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2028 revinfo = _revisioninfo(node, p1, p2, btext, textlen, cachedelta, flags)
2056
2029
2057 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2030 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2058
2031
2059 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2032 e = (offset_type(offset, flags), deltainfo.deltalen, textlen,
2060 deltainfo.base, link, p1r, p2r, node)
2033 deltainfo.base, link, p1r, p2r, node)
2061 self.index.append(e)
2034 self.index.append(e)
2062 self.nodemap[node] = curr
2035 self.nodemap[node] = curr
2063
2036
2064 # Reset the pure node cache start lookup offset to account for new
2037 # Reset the pure node cache start lookup offset to account for new
2065 # revision.
2038 # revision.
2066 if self._nodepos is not None:
2039 if self._nodepos is not None:
2067 self._nodepos = curr
2040 self._nodepos = curr
2068
2041
2069 entry = self._io.packentry(e, self.node, self.version, curr)
2042 entry = self._io.packentry(e, self.node, self.version, curr)
2070 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2043 self._writeentry(transaction, ifh, dfh, entry, deltainfo.data,
2071 link, offset)
2044 link, offset)
2072
2045
2073 rawtext = btext[0]
2046 rawtext = btext[0]
2074
2047
2075 if alwayscache and rawtext is None:
2048 if alwayscache and rawtext is None:
2076 rawtext = deltacomputer.buildtext(revinfo, fh)
2049 rawtext = deltacomputer.buildtext(revinfo, fh)
2077
2050
2078 if type(rawtext) == bytes: # only accept immutable objects
2051 if type(rawtext) == bytes: # only accept immutable objects
2079 self._revisioncache = (node, curr, rawtext)
2052 self._revisioncache = (node, curr, rawtext)
2080 self._chainbasecache[curr] = deltainfo.chainbase
2053 self._chainbasecache[curr] = deltainfo.chainbase
2081 return node
2054 return node
2082
2055
2083 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2056 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
2084 # Files opened in a+ mode have inconsistent behavior on various
2057 # Files opened in a+ mode have inconsistent behavior on various
2085 # platforms. Windows requires that a file positioning call be made
2058 # platforms. Windows requires that a file positioning call be made
2086 # when the file handle transitions between reads and writes. See
2059 # when the file handle transitions between reads and writes. See
2087 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2060 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2088 # platforms, Python or the platform itself can be buggy. Some versions
2061 # platforms, Python or the platform itself can be buggy. Some versions
2089 # of Solaris have been observed to not append at the end of the file
2062 # of Solaris have been observed to not append at the end of the file
2090 # if the file was seeked to before the end. See issue4943 for more.
2063 # if the file was seeked to before the end. See issue4943 for more.
2091 #
2064 #
2092 # We work around this issue by inserting a seek() before writing.
2065 # We work around this issue by inserting a seek() before writing.
2093 # Note: This is likely not necessary on Python 3. However, because
2066 # Note: This is likely not necessary on Python 3. However, because
2094 # the file handle is reused for reads and may be seeked there, we need
2067 # the file handle is reused for reads and may be seeked there, we need
2095 # to be careful before changing this.
2068 # to be careful before changing this.
2096 ifh.seek(0, os.SEEK_END)
2069 ifh.seek(0, os.SEEK_END)
2097 if dfh:
2070 if dfh:
2098 dfh.seek(0, os.SEEK_END)
2071 dfh.seek(0, os.SEEK_END)
2099
2072
2100 curr = len(self) - 1
2073 curr = len(self) - 1
2101 if not self._inline:
2074 if not self._inline:
2102 transaction.add(self.datafile, offset)
2075 transaction.add(self.datafile, offset)
2103 transaction.add(self.indexfile, curr * len(entry))
2076 transaction.add(self.indexfile, curr * len(entry))
2104 if data[0]:
2077 if data[0]:
2105 dfh.write(data[0])
2078 dfh.write(data[0])
2106 dfh.write(data[1])
2079 dfh.write(data[1])
2107 ifh.write(entry)
2080 ifh.write(entry)
2108 else:
2081 else:
2109 offset += curr * self._io.size
2082 offset += curr * self._io.size
2110 transaction.add(self.indexfile, offset, curr)
2083 transaction.add(self.indexfile, offset, curr)
2111 ifh.write(entry)
2084 ifh.write(entry)
2112 ifh.write(data[0])
2085 ifh.write(data[0])
2113 ifh.write(data[1])
2086 ifh.write(data[1])
2114 self._enforceinlinesize(transaction, ifh)
2087 self._enforceinlinesize(transaction, ifh)
2115
2088
2116 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2089 def addgroup(self, deltas, linkmapper, transaction, addrevisioncb=None):
2117 """
2090 """
2118 add a delta group
2091 add a delta group
2119
2092
2120 given a set of deltas, add them to the revision log. the
2093 given a set of deltas, add them to the revision log. the
2121 first delta is against its parent, which should be in our
2094 first delta is against its parent, which should be in our
2122 log, the rest are against the previous delta.
2095 log, the rest are against the previous delta.
2123
2096
2124 If ``addrevisioncb`` is defined, it will be called with arguments of
2097 If ``addrevisioncb`` is defined, it will be called with arguments of
2125 this revlog and the node that was added.
2098 this revlog and the node that was added.
2126 """
2099 """
2127
2100
2128 if self._writinghandles:
2101 if self._writinghandles:
2129 raise error.ProgrammingError('cannot nest addgroup() calls')
2102 raise error.ProgrammingError('cannot nest addgroup() calls')
2130
2103
2131 nodes = []
2104 nodes = []
2132
2105
2133 r = len(self)
2106 r = len(self)
2134 end = 0
2107 end = 0
2135 if r:
2108 if r:
2136 end = self.end(r - 1)
2109 end = self.end(r - 1)
2137 ifh = self._indexfp("a+")
2110 ifh = self._indexfp("a+")
2138 isize = r * self._io.size
2111 isize = r * self._io.size
2139 if self._inline:
2112 if self._inline:
2140 transaction.add(self.indexfile, end + isize, r)
2113 transaction.add(self.indexfile, end + isize, r)
2141 dfh = None
2114 dfh = None
2142 else:
2115 else:
2143 transaction.add(self.indexfile, isize, r)
2116 transaction.add(self.indexfile, isize, r)
2144 transaction.add(self.datafile, end)
2117 transaction.add(self.datafile, end)
2145 dfh = self._datafp("a+")
2118 dfh = self._datafp("a+")
2146 def flush():
2119 def flush():
2147 if dfh:
2120 if dfh:
2148 dfh.flush()
2121 dfh.flush()
2149 ifh.flush()
2122 ifh.flush()
2150
2123
2151 self._writinghandles = (ifh, dfh)
2124 self._writinghandles = (ifh, dfh)
2152
2125
2153 try:
2126 try:
2154 deltacomputer = deltautil.deltacomputer(self)
2127 deltacomputer = deltautil.deltacomputer(self)
2155 # loop through our set of deltas
2128 # loop through our set of deltas
2156 for data in deltas:
2129 for data in deltas:
2157 node, p1, p2, linknode, deltabase, delta, flags = data
2130 node, p1, p2, linknode, deltabase, delta, flags = data
2158 link = linkmapper(linknode)
2131 link = linkmapper(linknode)
2159 flags = flags or REVIDX_DEFAULT_FLAGS
2132 flags = flags or REVIDX_DEFAULT_FLAGS
2160
2133
2161 nodes.append(node)
2134 nodes.append(node)
2162
2135
2163 if node in self.nodemap:
2136 if node in self.nodemap:
2164 self._nodeduplicatecallback(transaction, node)
2137 self._nodeduplicatecallback(transaction, node)
2165 # this can happen if two branches make the same change
2138 # this can happen if two branches make the same change
2166 continue
2139 continue
2167
2140
2168 for p in (p1, p2):
2141 for p in (p1, p2):
2169 if p not in self.nodemap:
2142 if p not in self.nodemap:
2170 raise error.LookupError(p, self.indexfile,
2143 raise error.LookupError(p, self.indexfile,
2171 _('unknown parent'))
2144 _('unknown parent'))
2172
2145
2173 if deltabase not in self.nodemap:
2146 if deltabase not in self.nodemap:
2174 raise error.LookupError(deltabase, self.indexfile,
2147 raise error.LookupError(deltabase, self.indexfile,
2175 _('unknown delta base'))
2148 _('unknown delta base'))
2176
2149
2177 baserev = self.rev(deltabase)
2150 baserev = self.rev(deltabase)
2178
2151
2179 if baserev != nullrev and self.iscensored(baserev):
2152 if baserev != nullrev and self.iscensored(baserev):
2180 # if base is censored, delta must be full replacement in a
2153 # if base is censored, delta must be full replacement in a
2181 # single patch operation
2154 # single patch operation
2182 hlen = struct.calcsize(">lll")
2155 hlen = struct.calcsize(">lll")
2183 oldlen = self.rawsize(baserev)
2156 oldlen = self.rawsize(baserev)
2184 newlen = len(delta) - hlen
2157 newlen = len(delta) - hlen
2185 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2158 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
2186 raise error.CensoredBaseError(self.indexfile,
2159 raise error.CensoredBaseError(self.indexfile,
2187 self.node(baserev))
2160 self.node(baserev))
2188
2161
2189 if not flags and self._peek_iscensored(baserev, delta, flush):
2162 if not flags and self._peek_iscensored(baserev, delta, flush):
2190 flags |= REVIDX_ISCENSORED
2163 flags |= REVIDX_ISCENSORED
2191
2164
2192 # We assume consumers of addrevisioncb will want to retrieve
2165 # We assume consumers of addrevisioncb will want to retrieve
2193 # the added revision, which will require a call to
2166 # the added revision, which will require a call to
2194 # revision(). revision() will fast path if there is a cache
2167 # revision(). revision() will fast path if there is a cache
2195 # hit. So, we tell _addrevision() to always cache in this case.
2168 # hit. So, we tell _addrevision() to always cache in this case.
2196 # We're only using addgroup() in the context of changegroup
2169 # We're only using addgroup() in the context of changegroup
2197 # generation so the revision data can always be handled as raw
2170 # generation so the revision data can always be handled as raw
2198 # by the flagprocessor.
2171 # by the flagprocessor.
2199 self._addrevision(node, None, transaction, link,
2172 self._addrevision(node, None, transaction, link,
2200 p1, p2, flags, (baserev, delta),
2173 p1, p2, flags, (baserev, delta),
2201 ifh, dfh,
2174 ifh, dfh,
2202 alwayscache=bool(addrevisioncb),
2175 alwayscache=bool(addrevisioncb),
2203 deltacomputer=deltacomputer)
2176 deltacomputer=deltacomputer)
2204
2177
2205 if addrevisioncb:
2178 if addrevisioncb:
2206 addrevisioncb(self, node)
2179 addrevisioncb(self, node)
2207
2180
2208 if not dfh and not self._inline:
2181 if not dfh and not self._inline:
2209 # addrevision switched from inline to conventional
2182 # addrevision switched from inline to conventional
2210 # reopen the index
2183 # reopen the index
2211 ifh.close()
2184 ifh.close()
2212 dfh = self._datafp("a+")
2185 dfh = self._datafp("a+")
2213 ifh = self._indexfp("a+")
2186 ifh = self._indexfp("a+")
2214 self._writinghandles = (ifh, dfh)
2187 self._writinghandles = (ifh, dfh)
2215 finally:
2188 finally:
2216 self._writinghandles = None
2189 self._writinghandles = None
2217
2190
2218 if dfh:
2191 if dfh:
2219 dfh.close()
2192 dfh.close()
2220 ifh.close()
2193 ifh.close()
2221
2194
2222 return nodes
2195 return nodes
2223
2196
2224 def iscensored(self, rev):
2197 def iscensored(self, rev):
2225 """Check if a file revision is censored."""
2198 """Check if a file revision is censored."""
2226 if not self._censorable:
2199 if not self._censorable:
2227 return False
2200 return False
2228
2201
2229 return self.flags(rev) & REVIDX_ISCENSORED
2202 return self.flags(rev) & REVIDX_ISCENSORED
2230
2203
2231 def _peek_iscensored(self, baserev, delta, flush):
2204 def _peek_iscensored(self, baserev, delta, flush):
2232 """Quickly check if a delta produces a censored revision."""
2205 """Quickly check if a delta produces a censored revision."""
2233 if not self._censorable:
2206 if not self._censorable:
2234 return False
2207 return False
2235
2208
2236 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2209 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2237
2210
2238 def getstrippoint(self, minlink):
2211 def getstrippoint(self, minlink):
2239 """find the minimum rev that must be stripped to strip the linkrev
2212 """find the minimum rev that must be stripped to strip the linkrev
2240
2213
2241 Returns a tuple containing the minimum rev and a set of all revs that
2214 Returns a tuple containing the minimum rev and a set of all revs that
2242 have linkrevs that will be broken by this strip.
2215 have linkrevs that will be broken by this strip.
2243 """
2216 """
2244 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2217 return storageutil.resolvestripinfo(minlink, len(self) - 1,
2245 self.headrevs(),
2218 self.headrevs(),
2246 self.linkrev, self.parentrevs)
2219 self.linkrev, self.parentrevs)
2247
2220
2248 def strip(self, minlink, transaction):
2221 def strip(self, minlink, transaction):
2249 """truncate the revlog on the first revision with a linkrev >= minlink
2222 """truncate the revlog on the first revision with a linkrev >= minlink
2250
2223
2251 This function is called when we're stripping revision minlink and
2224 This function is called when we're stripping revision minlink and
2252 its descendants from the repository.
2225 its descendants from the repository.
2253
2226
2254 We have to remove all revisions with linkrev >= minlink, because
2227 We have to remove all revisions with linkrev >= minlink, because
2255 the equivalent changelog revisions will be renumbered after the
2228 the equivalent changelog revisions will be renumbered after the
2256 strip.
2229 strip.
2257
2230
2258 So we truncate the revlog on the first of these revisions, and
2231 So we truncate the revlog on the first of these revisions, and
2259 trust that the caller has saved the revisions that shouldn't be
2232 trust that the caller has saved the revisions that shouldn't be
2260 removed and that it'll re-add them after this truncation.
2233 removed and that it'll re-add them after this truncation.
2261 """
2234 """
2262 if len(self) == 0:
2235 if len(self) == 0:
2263 return
2236 return
2264
2237
2265 rev, _ = self.getstrippoint(minlink)
2238 rev, _ = self.getstrippoint(minlink)
2266 if rev == len(self):
2239 if rev == len(self):
2267 return
2240 return
2268
2241
2269 # first truncate the files on disk
2242 # first truncate the files on disk
2270 end = self.start(rev)
2243 end = self.start(rev)
2271 if not self._inline:
2244 if not self._inline:
2272 transaction.add(self.datafile, end)
2245 transaction.add(self.datafile, end)
2273 end = rev * self._io.size
2246 end = rev * self._io.size
2274 else:
2247 else:
2275 end += rev * self._io.size
2248 end += rev * self._io.size
2276
2249
2277 transaction.add(self.indexfile, end)
2250 transaction.add(self.indexfile, end)
2278
2251
2279 # then reset internal state in memory to forget those revisions
2252 # then reset internal state in memory to forget those revisions
2280 self._revisioncache = None
2253 self._revisioncache = None
2281 self._chaininfocache = {}
2254 self._chaininfocache = {}
2282 self._chunkclear()
2255 self._chunkclear()
2283 for x in pycompat.xrange(rev, len(self)):
2256 for x in pycompat.xrange(rev, len(self)):
2284 del self.nodemap[self.node(x)]
2257 del self.nodemap[self.node(x)]
2285
2258
2286 del self.index[rev:-1]
2259 del self.index[rev:-1]
2287 self._nodepos = None
2260 self._nodepos = None
2288
2261
2289 def checksize(self):
2262 def checksize(self):
2290 """Check size of index and data files
2263 """Check size of index and data files
2291
2264
2292 return a (dd, di) tuple.
2265 return a (dd, di) tuple.
2293 - dd: extra bytes for the "data" file
2266 - dd: extra bytes for the "data" file
2294 - di: extra bytes for the "index" file
2267 - di: extra bytes for the "index" file
2295
2268
2296 A healthy revlog will return (0, 0).
2269 A healthy revlog will return (0, 0).
2297 """
2270 """
2298 expected = 0
2271 expected = 0
2299 if len(self):
2272 if len(self):
2300 expected = max(0, self.end(len(self) - 1))
2273 expected = max(0, self.end(len(self) - 1))
2301
2274
2302 try:
2275 try:
2303 with self._datafp() as f:
2276 with self._datafp() as f:
2304 f.seek(0, io.SEEK_END)
2277 f.seek(0, io.SEEK_END)
2305 actual = f.tell()
2278 actual = f.tell()
2306 dd = actual - expected
2279 dd = actual - expected
2307 except IOError as inst:
2280 except IOError as inst:
2308 if inst.errno != errno.ENOENT:
2281 if inst.errno != errno.ENOENT:
2309 raise
2282 raise
2310 dd = 0
2283 dd = 0
2311
2284
2312 try:
2285 try:
2313 f = self.opener(self.indexfile)
2286 f = self.opener(self.indexfile)
2314 f.seek(0, io.SEEK_END)
2287 f.seek(0, io.SEEK_END)
2315 actual = f.tell()
2288 actual = f.tell()
2316 f.close()
2289 f.close()
2317 s = self._io.size
2290 s = self._io.size
2318 i = max(0, actual // s)
2291 i = max(0, actual // s)
2319 di = actual - (i * s)
2292 di = actual - (i * s)
2320 if self._inline:
2293 if self._inline:
2321 databytes = 0
2294 databytes = 0
2322 for r in self:
2295 for r in self:
2323 databytes += max(0, self.length(r))
2296 databytes += max(0, self.length(r))
2324 dd = 0
2297 dd = 0
2325 di = actual - len(self) * s - databytes
2298 di = actual - len(self) * s - databytes
2326 except IOError as inst:
2299 except IOError as inst:
2327 if inst.errno != errno.ENOENT:
2300 if inst.errno != errno.ENOENT:
2328 raise
2301 raise
2329 di = 0
2302 di = 0
2330
2303
2331 return (dd, di)
2304 return (dd, di)
2332
2305
2333 def files(self):
2306 def files(self):
2334 res = [self.indexfile]
2307 res = [self.indexfile]
2335 if not self._inline:
2308 if not self._inline:
2336 res.append(self.datafile)
2309 res.append(self.datafile)
2337 return res
2310 return res
2338
2311
2339 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2312 def emitrevisions(self, nodes, nodesorder=None, revisiondata=False,
2340 assumehaveparentrevisions=False,
2313 assumehaveparentrevisions=False,
2341 deltamode=repository.CG_DELTAMODE_STD):
2314 deltamode=repository.CG_DELTAMODE_STD):
2342 if nodesorder not in ('nodes', 'storage', 'linear', None):
2315 if nodesorder not in ('nodes', 'storage', 'linear', None):
2343 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2316 raise error.ProgrammingError('unhandled value for nodesorder: %s' %
2344 nodesorder)
2317 nodesorder)
2345
2318
2346 if nodesorder is None and not self._generaldelta:
2319 if nodesorder is None and not self._generaldelta:
2347 nodesorder = 'storage'
2320 nodesorder = 'storage'
2348
2321
2349 if (not self._storedeltachains and
2322 if (not self._storedeltachains and
2350 deltamode != repository.CG_DELTAMODE_PREV):
2323 deltamode != repository.CG_DELTAMODE_PREV):
2351 deltamode = repository.CG_DELTAMODE_FULL
2324 deltamode = repository.CG_DELTAMODE_FULL
2352
2325
2353 return storageutil.emitrevisions(
2326 return storageutil.emitrevisions(
2354 self, nodes, nodesorder, revlogrevisiondelta,
2327 self, nodes, nodesorder, revlogrevisiondelta,
2355 deltaparentfn=self.deltaparent,
2328 deltaparentfn=self.deltaparent,
2356 candeltafn=self.candelta,
2329 candeltafn=self.candelta,
2357 rawsizefn=self.rawsize,
2330 rawsizefn=self.rawsize,
2358 revdifffn=self.revdiff,
2331 revdifffn=self.revdiff,
2359 flagsfn=self.flags,
2332 flagsfn=self.flags,
2360 deltamode=deltamode,
2333 deltamode=deltamode,
2361 revisiondata=revisiondata,
2334 revisiondata=revisiondata,
2362 assumehaveparentrevisions=assumehaveparentrevisions)
2335 assumehaveparentrevisions=assumehaveparentrevisions)
2363
2336
2364 DELTAREUSEALWAYS = 'always'
2337 DELTAREUSEALWAYS = 'always'
2365 DELTAREUSESAMEREVS = 'samerevs'
2338 DELTAREUSESAMEREVS = 'samerevs'
2366 DELTAREUSENEVER = 'never'
2339 DELTAREUSENEVER = 'never'
2367
2340
2368 DELTAREUSEFULLADD = 'fulladd'
2341 DELTAREUSEFULLADD = 'fulladd'
2369
2342
2370 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2343 DELTAREUSEALL = {'always', 'samerevs', 'never', 'fulladd'}
2371
2344
2372 def clone(self, tr, destrevlog, addrevisioncb=None,
2345 def clone(self, tr, destrevlog, addrevisioncb=None,
2373 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2346 deltareuse=DELTAREUSESAMEREVS, forcedeltabothparents=None):
2374 """Copy this revlog to another, possibly with format changes.
2347 """Copy this revlog to another, possibly with format changes.
2375
2348
2376 The destination revlog will contain the same revisions and nodes.
2349 The destination revlog will contain the same revisions and nodes.
2377 However, it may not be bit-for-bit identical due to e.g. delta encoding
2350 However, it may not be bit-for-bit identical due to e.g. delta encoding
2378 differences.
2351 differences.
2379
2352
2380 The ``deltareuse`` argument control how deltas from the existing revlog
2353 The ``deltareuse`` argument control how deltas from the existing revlog
2381 are preserved in the destination revlog. The argument can have the
2354 are preserved in the destination revlog. The argument can have the
2382 following values:
2355 following values:
2383
2356
2384 DELTAREUSEALWAYS
2357 DELTAREUSEALWAYS
2385 Deltas will always be reused (if possible), even if the destination
2358 Deltas will always be reused (if possible), even if the destination
2386 revlog would not select the same revisions for the delta. This is the
2359 revlog would not select the same revisions for the delta. This is the
2387 fastest mode of operation.
2360 fastest mode of operation.
2388 DELTAREUSESAMEREVS
2361 DELTAREUSESAMEREVS
2389 Deltas will be reused if the destination revlog would pick the same
2362 Deltas will be reused if the destination revlog would pick the same
2390 revisions for the delta. This mode strikes a balance between speed
2363 revisions for the delta. This mode strikes a balance between speed
2391 and optimization.
2364 and optimization.
2392 DELTAREUSENEVER
2365 DELTAREUSENEVER
2393 Deltas will never be reused. This is the slowest mode of execution.
2366 Deltas will never be reused. This is the slowest mode of execution.
2394 This mode can be used to recompute deltas (e.g. if the diff/delta
2367 This mode can be used to recompute deltas (e.g. if the diff/delta
2395 algorithm changes).
2368 algorithm changes).
2396
2369
2397 Delta computation can be slow, so the choice of delta reuse policy can
2370 Delta computation can be slow, so the choice of delta reuse policy can
2398 significantly affect run time.
2371 significantly affect run time.
2399
2372
2400 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2373 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
2401 two extremes. Deltas will be reused if they are appropriate. But if the
2374 two extremes. Deltas will be reused if they are appropriate. But if the
2402 delta could choose a better revision, it will do so. This means if you
2375 delta could choose a better revision, it will do so. This means if you
2403 are converting a non-generaldelta revlog to a generaldelta revlog,
2376 are converting a non-generaldelta revlog to a generaldelta revlog,
2404 deltas will be recomputed if the delta's parent isn't a parent of the
2377 deltas will be recomputed if the delta's parent isn't a parent of the
2405 revision.
2378 revision.
2406
2379
2407 In addition to the delta policy, the ``forcedeltabothparents``
2380 In addition to the delta policy, the ``forcedeltabothparents``
2408 argument controls whether to force compute deltas against both parents
2381 argument controls whether to force compute deltas against both parents
2409 for merges. By default, the current default is used.
2382 for merges. By default, the current default is used.
2410 """
2383 """
2411 if deltareuse not in self.DELTAREUSEALL:
2384 if deltareuse not in self.DELTAREUSEALL:
2412 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2385 raise ValueError(_('value for deltareuse invalid: %s') % deltareuse)
2413
2386
2414 if len(destrevlog):
2387 if len(destrevlog):
2415 raise ValueError(_('destination revlog is not empty'))
2388 raise ValueError(_('destination revlog is not empty'))
2416
2389
2417 if getattr(self, 'filteredrevs', None):
2390 if getattr(self, 'filteredrevs', None):
2418 raise ValueError(_('source revlog has filtered revisions'))
2391 raise ValueError(_('source revlog has filtered revisions'))
2419 if getattr(destrevlog, 'filteredrevs', None):
2392 if getattr(destrevlog, 'filteredrevs', None):
2420 raise ValueError(_('destination revlog has filtered revisions'))
2393 raise ValueError(_('destination revlog has filtered revisions'))
2421
2394
2422 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2395 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
2423 # if possible.
2396 # if possible.
2424 oldlazydelta = destrevlog._lazydelta
2397 oldlazydelta = destrevlog._lazydelta
2425 oldlazydeltabase = destrevlog._lazydeltabase
2398 oldlazydeltabase = destrevlog._lazydeltabase
2426 oldamd = destrevlog._deltabothparents
2399 oldamd = destrevlog._deltabothparents
2427
2400
2428 try:
2401 try:
2429 if deltareuse == self.DELTAREUSEALWAYS:
2402 if deltareuse == self.DELTAREUSEALWAYS:
2430 destrevlog._lazydeltabase = True
2403 destrevlog._lazydeltabase = True
2431 destrevlog._lazydelta = True
2404 destrevlog._lazydelta = True
2432 elif deltareuse == self.DELTAREUSESAMEREVS:
2405 elif deltareuse == self.DELTAREUSESAMEREVS:
2433 destrevlog._lazydeltabase = False
2406 destrevlog._lazydeltabase = False
2434 destrevlog._lazydelta = True
2407 destrevlog._lazydelta = True
2435 elif deltareuse == self.DELTAREUSENEVER:
2408 elif deltareuse == self.DELTAREUSENEVER:
2436 destrevlog._lazydeltabase = False
2409 destrevlog._lazydeltabase = False
2437 destrevlog._lazydelta = False
2410 destrevlog._lazydelta = False
2438
2411
2439 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2412 destrevlog._deltabothparents = forcedeltabothparents or oldamd
2440
2413
2441 deltacomputer = deltautil.deltacomputer(destrevlog)
2414 deltacomputer = deltautil.deltacomputer(destrevlog)
2442 index = self.index
2415 index = self.index
2443 for rev in self:
2416 for rev in self:
2444 entry = index[rev]
2417 entry = index[rev]
2445
2418
2446 # Some classes override linkrev to take filtered revs into
2419 # Some classes override linkrev to take filtered revs into
2447 # account. Use raw entry from index.
2420 # account. Use raw entry from index.
2448 flags = entry[0] & 0xffff
2421 flags = entry[0] & 0xffff
2449 linkrev = entry[4]
2422 linkrev = entry[4]
2450 p1 = index[entry[5]][7]
2423 p1 = index[entry[5]][7]
2451 p2 = index[entry[6]][7]
2424 p2 = index[entry[6]][7]
2452 node = entry[7]
2425 node = entry[7]
2453
2426
2454 # (Possibly) reuse the delta from the revlog if allowed and
2427 # (Possibly) reuse the delta from the revlog if allowed and
2455 # the revlog chunk is a delta.
2428 # the revlog chunk is a delta.
2456 cachedelta = None
2429 cachedelta = None
2457 rawtext = None
2430 rawtext = None
2458 if (deltareuse != self.DELTAREUSEFULLADD
2431 if (deltareuse != self.DELTAREUSEFULLADD
2459 and destrevlog._lazydelta):
2432 and destrevlog._lazydelta):
2460 dp = self.deltaparent(rev)
2433 dp = self.deltaparent(rev)
2461 if dp != nullrev:
2434 if dp != nullrev:
2462 cachedelta = (dp, bytes(self._chunk(rev)))
2435 cachedelta = (dp, bytes(self._chunk(rev)))
2463
2436
2464 if not cachedelta:
2437 if not cachedelta:
2465 rawtext = self.revision(rev, raw=True)
2438 rawtext = self.revision(rev, raw=True)
2466
2439
2467
2440
2468 if deltareuse == self.DELTAREUSEFULLADD:
2441 if deltareuse == self.DELTAREUSEFULLADD:
2469 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2442 destrevlog.addrevision(rawtext, tr, linkrev, p1, p2,
2470 cachedelta=cachedelta,
2443 cachedelta=cachedelta,
2471 node=node, flags=flags,
2444 node=node, flags=flags,
2472 deltacomputer=deltacomputer)
2445 deltacomputer=deltacomputer)
2473 else:
2446 else:
2474 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2447 ifh = destrevlog.opener(destrevlog.indexfile, 'a+',
2475 checkambig=False)
2448 checkambig=False)
2476 dfh = None
2449 dfh = None
2477 if not destrevlog._inline:
2450 if not destrevlog._inline:
2478 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2451 dfh = destrevlog.opener(destrevlog.datafile, 'a+')
2479 try:
2452 try:
2480 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2453 destrevlog._addrevision(node, rawtext, tr, linkrev, p1,
2481 p2, flags, cachedelta, ifh, dfh,
2454 p2, flags, cachedelta, ifh, dfh,
2482 deltacomputer=deltacomputer)
2455 deltacomputer=deltacomputer)
2483 finally:
2456 finally:
2484 if dfh:
2457 if dfh:
2485 dfh.close()
2458 dfh.close()
2486 ifh.close()
2459 ifh.close()
2487
2460
2488 if addrevisioncb:
2461 if addrevisioncb:
2489 addrevisioncb(self, rev, node)
2462 addrevisioncb(self, rev, node)
2490 finally:
2463 finally:
2491 destrevlog._lazydelta = oldlazydelta
2464 destrevlog._lazydelta = oldlazydelta
2492 destrevlog._lazydeltabase = oldlazydeltabase
2465 destrevlog._lazydeltabase = oldlazydeltabase
2493 destrevlog._deltabothparents = oldamd
2466 destrevlog._deltabothparents = oldamd
2494
2467
2495 def censorrevision(self, tr, censornode, tombstone=b''):
2468 def censorrevision(self, tr, censornode, tombstone=b''):
2496 if (self.version & 0xFFFF) == REVLOGV0:
2469 if (self.version & 0xFFFF) == REVLOGV0:
2497 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2470 raise error.RevlogError(_('cannot censor with version %d revlogs') %
2498 self.version)
2471 self.version)
2499
2472
2500 censorrev = self.rev(censornode)
2473 censorrev = self.rev(censornode)
2501 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2474 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
2502
2475
2503 if len(tombstone) > self.rawsize(censorrev):
2476 if len(tombstone) > self.rawsize(censorrev):
2504 raise error.Abort(_('censor tombstone must be no longer than '
2477 raise error.Abort(_('censor tombstone must be no longer than '
2505 'censored data'))
2478 'censored data'))
2506
2479
2507 # Rewriting the revlog in place is hard. Our strategy for censoring is
2480 # Rewriting the revlog in place is hard. Our strategy for censoring is
2508 # to create a new revlog, copy all revisions to it, then replace the
2481 # to create a new revlog, copy all revisions to it, then replace the
2509 # revlogs on transaction close.
2482 # revlogs on transaction close.
2510
2483
2511 newindexfile = self.indexfile + b'.tmpcensored'
2484 newindexfile = self.indexfile + b'.tmpcensored'
2512 newdatafile = self.datafile + b'.tmpcensored'
2485 newdatafile = self.datafile + b'.tmpcensored'
2513
2486
2514 # This is a bit dangerous. We could easily have a mismatch of state.
2487 # This is a bit dangerous. We could easily have a mismatch of state.
2515 newrl = revlog(self.opener, newindexfile, newdatafile,
2488 newrl = revlog(self.opener, newindexfile, newdatafile,
2516 censorable=True)
2489 censorable=True)
2517 newrl.version = self.version
2490 newrl.version = self.version
2518 newrl._generaldelta = self._generaldelta
2491 newrl._generaldelta = self._generaldelta
2519 newrl._io = self._io
2492 newrl._io = self._io
2520
2493
2521 for rev in self.revs():
2494 for rev in self.revs():
2522 node = self.node(rev)
2495 node = self.node(rev)
2523 p1, p2 = self.parents(node)
2496 p1, p2 = self.parents(node)
2524
2497
2525 if rev == censorrev:
2498 if rev == censorrev:
2526 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2499 newrl.addrawrevision(tombstone, tr, self.linkrev(censorrev),
2527 p1, p2, censornode, REVIDX_ISCENSORED)
2500 p1, p2, censornode, REVIDX_ISCENSORED)
2528
2501
2529 if newrl.deltaparent(rev) != nullrev:
2502 if newrl.deltaparent(rev) != nullrev:
2530 raise error.Abort(_('censored revision stored as delta; '
2503 raise error.Abort(_('censored revision stored as delta; '
2531 'cannot censor'),
2504 'cannot censor'),
2532 hint=_('censoring of revlogs is not '
2505 hint=_('censoring of revlogs is not '
2533 'fully implemented; please report '
2506 'fully implemented; please report '
2534 'this bug'))
2507 'this bug'))
2535 continue
2508 continue
2536
2509
2537 if self.iscensored(rev):
2510 if self.iscensored(rev):
2538 if self.deltaparent(rev) != nullrev:
2511 if self.deltaparent(rev) != nullrev:
2539 raise error.Abort(_('cannot censor due to censored '
2512 raise error.Abort(_('cannot censor due to censored '
2540 'revision having delta stored'))
2513 'revision having delta stored'))
2541 rawtext = self._chunk(rev)
2514 rawtext = self._chunk(rev)
2542 else:
2515 else:
2543 rawtext = self.revision(rev, raw=True)
2516 rawtext = self.revision(rev, raw=True)
2544
2517
2545 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2518 newrl.addrawrevision(rawtext, tr, self.linkrev(rev), p1, p2, node,
2546 self.flags(rev))
2519 self.flags(rev))
2547
2520
2548 tr.addbackup(self.indexfile, location='store')
2521 tr.addbackup(self.indexfile, location='store')
2549 if not self._inline:
2522 if not self._inline:
2550 tr.addbackup(self.datafile, location='store')
2523 tr.addbackup(self.datafile, location='store')
2551
2524
2552 self.opener.rename(newrl.indexfile, self.indexfile)
2525 self.opener.rename(newrl.indexfile, self.indexfile)
2553 if not self._inline:
2526 if not self._inline:
2554 self.opener.rename(newrl.datafile, self.datafile)
2527 self.opener.rename(newrl.datafile, self.datafile)
2555
2528
2556 self.clearcaches()
2529 self.clearcaches()
2557 self._loadindex()
2530 self._loadindex()
2558
2531
2559 def verifyintegrity(self, state):
2532 def verifyintegrity(self, state):
2560 """Verifies the integrity of the revlog.
2533 """Verifies the integrity of the revlog.
2561
2534
2562 Yields ``revlogproblem`` instances describing problems that are
2535 Yields ``revlogproblem`` instances describing problems that are
2563 found.
2536 found.
2564 """
2537 """
2565 dd, di = self.checksize()
2538 dd, di = self.checksize()
2566 if dd:
2539 if dd:
2567 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2540 yield revlogproblem(error=_('data length off by %d bytes') % dd)
2568 if di:
2541 if di:
2569 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2542 yield revlogproblem(error=_('index contains %d extra bytes') % di)
2570
2543
2571 version = self.version & 0xFFFF
2544 version = self.version & 0xFFFF
2572
2545
2573 # The verifier tells us what version revlog we should be.
2546 # The verifier tells us what version revlog we should be.
2574 if version != state['expectedversion']:
2547 if version != state['expectedversion']:
2575 yield revlogproblem(
2548 yield revlogproblem(
2576 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2549 warning=_("warning: '%s' uses revlog format %d; expected %d") %
2577 (self.indexfile, version, state['expectedversion']))
2550 (self.indexfile, version, state['expectedversion']))
2578
2551
2579 state['skipread'] = set()
2552 state['skipread'] = set()
2580
2553
2581 for rev in self:
2554 for rev in self:
2582 node = self.node(rev)
2555 node = self.node(rev)
2583
2556
2584 # Verify contents. 4 cases to care about:
2557 # Verify contents. 4 cases to care about:
2585 #
2558 #
2586 # common: the most common case
2559 # common: the most common case
2587 # rename: with a rename
2560 # rename: with a rename
2588 # meta: file content starts with b'\1\n', the metadata
2561 # meta: file content starts with b'\1\n', the metadata
2589 # header defined in filelog.py, but without a rename
2562 # header defined in filelog.py, but without a rename
2590 # ext: content stored externally
2563 # ext: content stored externally
2591 #
2564 #
2592 # More formally, their differences are shown below:
2565 # More formally, their differences are shown below:
2593 #
2566 #
2594 # | common | rename | meta | ext
2567 # | common | rename | meta | ext
2595 # -------------------------------------------------------
2568 # -------------------------------------------------------
2596 # flags() | 0 | 0 | 0 | not 0
2569 # flags() | 0 | 0 | 0 | not 0
2597 # renamed() | False | True | False | ?
2570 # renamed() | False | True | False | ?
2598 # rawtext[0:2]=='\1\n'| False | True | True | ?
2571 # rawtext[0:2]=='\1\n'| False | True | True | ?
2599 #
2572 #
2600 # "rawtext" means the raw text stored in revlog data, which
2573 # "rawtext" means the raw text stored in revlog data, which
2601 # could be retrieved by "revision(rev, raw=True)". "text"
2574 # could be retrieved by "revision(rev, raw=True)". "text"
2602 # mentioned below is "revision(rev, raw=False)".
2575 # mentioned below is "revision(rev, raw=False)".
2603 #
2576 #
2604 # There are 3 different lengths stored physically:
2577 # There are 3 different lengths stored physically:
2605 # 1. L1: rawsize, stored in revlog index
2578 # 1. L1: rawsize, stored in revlog index
2606 # 2. L2: len(rawtext), stored in revlog data
2579 # 2. L2: len(rawtext), stored in revlog data
2607 # 3. L3: len(text), stored in revlog data if flags==0, or
2580 # 3. L3: len(text), stored in revlog data if flags==0, or
2608 # possibly somewhere else if flags!=0
2581 # possibly somewhere else if flags!=0
2609 #
2582 #
2610 # L1 should be equal to L2. L3 could be different from them.
2583 # L1 should be equal to L2. L3 could be different from them.
2611 # "text" may or may not affect commit hash depending on flag
2584 # "text" may or may not affect commit hash depending on flag
2612 # processors (see revlog.addflagprocessor).
2585 # processors (see flagutil.addflagprocessor).
2613 #
2586 #
2614 # | common | rename | meta | ext
2587 # | common | rename | meta | ext
2615 # -------------------------------------------------
2588 # -------------------------------------------------
2616 # rawsize() | L1 | L1 | L1 | L1
2589 # rawsize() | L1 | L1 | L1 | L1
2617 # size() | L1 | L2-LM | L1(*) | L1 (?)
2590 # size() | L1 | L2-LM | L1(*) | L1 (?)
2618 # len(rawtext) | L2 | L2 | L2 | L2
2591 # len(rawtext) | L2 | L2 | L2 | L2
2619 # len(text) | L2 | L2 | L2 | L3
2592 # len(text) | L2 | L2 | L2 | L3
2620 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2593 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
2621 #
2594 #
2622 # LM: length of metadata, depending on rawtext
2595 # LM: length of metadata, depending on rawtext
2623 # (*): not ideal, see comment in filelog.size
2596 # (*): not ideal, see comment in filelog.size
2624 # (?): could be "- len(meta)" if the resolved content has
2597 # (?): could be "- len(meta)" if the resolved content has
2625 # rename metadata
2598 # rename metadata
2626 #
2599 #
2627 # Checks needed to be done:
2600 # Checks needed to be done:
2628 # 1. length check: L1 == L2, in all cases.
2601 # 1. length check: L1 == L2, in all cases.
2629 # 2. hash check: depending on flag processor, we may need to
2602 # 2. hash check: depending on flag processor, we may need to
2630 # use either "text" (external), or "rawtext" (in revlog).
2603 # use either "text" (external), or "rawtext" (in revlog).
2631
2604
2632 try:
2605 try:
2633 skipflags = state.get('skipflags', 0)
2606 skipflags = state.get('skipflags', 0)
2634 if skipflags:
2607 if skipflags:
2635 skipflags &= self.flags(rev)
2608 skipflags &= self.flags(rev)
2636
2609
2637 if skipflags:
2610 if skipflags:
2638 state['skipread'].add(node)
2611 state['skipread'].add(node)
2639 else:
2612 else:
2640 # Side-effect: read content and verify hash.
2613 # Side-effect: read content and verify hash.
2641 self.revision(node)
2614 self.revision(node)
2642
2615
2643 l1 = self.rawsize(rev)
2616 l1 = self.rawsize(rev)
2644 l2 = len(self.revision(node, raw=True))
2617 l2 = len(self.revision(node, raw=True))
2645
2618
2646 if l1 != l2:
2619 if l1 != l2:
2647 yield revlogproblem(
2620 yield revlogproblem(
2648 error=_('unpacked size is %d, %d expected') % (l2, l1),
2621 error=_('unpacked size is %d, %d expected') % (l2, l1),
2649 node=node)
2622 node=node)
2650
2623
2651 except error.CensoredNodeError:
2624 except error.CensoredNodeError:
2652 if state['erroroncensored']:
2625 if state['erroroncensored']:
2653 yield revlogproblem(error=_('censored file data'),
2626 yield revlogproblem(error=_('censored file data'),
2654 node=node)
2627 node=node)
2655 state['skipread'].add(node)
2628 state['skipread'].add(node)
2656 except Exception as e:
2629 except Exception as e:
2657 yield revlogproblem(
2630 yield revlogproblem(
2658 error=_('unpacking %s: %s') % (short(node),
2631 error=_('unpacking %s: %s') % (short(node),
2659 stringutil.forcebytestr(e)),
2632 stringutil.forcebytestr(e)),
2660 node=node)
2633 node=node)
2661 state['skipread'].add(node)
2634 state['skipread'].add(node)
2662
2635
2663 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2636 def storageinfo(self, exclusivefiles=False, sharedfiles=False,
2664 revisionscount=False, trackedsize=False,
2637 revisionscount=False, trackedsize=False,
2665 storedsize=False):
2638 storedsize=False):
2666 d = {}
2639 d = {}
2667
2640
2668 if exclusivefiles:
2641 if exclusivefiles:
2669 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2642 d['exclusivefiles'] = [(self.opener, self.indexfile)]
2670 if not self._inline:
2643 if not self._inline:
2671 d['exclusivefiles'].append((self.opener, self.datafile))
2644 d['exclusivefiles'].append((self.opener, self.datafile))
2672
2645
2673 if sharedfiles:
2646 if sharedfiles:
2674 d['sharedfiles'] = []
2647 d['sharedfiles'] = []
2675
2648
2676 if revisionscount:
2649 if revisionscount:
2677 d['revisionscount'] = len(self)
2650 d['revisionscount'] = len(self)
2678
2651
2679 if trackedsize:
2652 if trackedsize:
2680 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2653 d['trackedsize'] = sum(map(self.rawsize, iter(self)))
2681
2654
2682 if storedsize:
2655 if storedsize:
2683 d['storedsize'] = sum(self.opener.stat(path).st_size
2656 d['storedsize'] = sum(self.opener.stat(path).st_size
2684 for path in self.files())
2657 for path in self.files())
2685
2658
2686 return d
2659 return d
@@ -1,53 +1,80 b''
1 # flagutils.py - code to deal with revlog flags and their processors
1 # flagutils.py - code to deal with revlog flags and their processors
2 #
2 #
3 # Copyright 2016 Remi Chaintron <remi@fb.com>
3 # Copyright 2016 Remi Chaintron <remi@fb.com>
4 # Copyright 2016-2019 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Copyright 2016-2019 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 from ..i18n import _
11 from ..i18n import _
12
12
13 from .constants import (
13 from .constants import (
14 REVIDX_DEFAULT_FLAGS,
14 REVIDX_DEFAULT_FLAGS,
15 REVIDX_ELLIPSIS,
15 REVIDX_ELLIPSIS,
16 REVIDX_EXTSTORED,
16 REVIDX_EXTSTORED,
17 REVIDX_FLAGS_ORDER,
17 REVIDX_FLAGS_ORDER,
18 REVIDX_ISCENSORED,
18 REVIDX_ISCENSORED,
19 REVIDX_RAWTEXT_CHANGING_FLAGS,
19 REVIDX_RAWTEXT_CHANGING_FLAGS,
20 )
20 )
21
21
22 from .. import (
22 from .. import (
23 error,
23 error,
24 util
24 util
25 )
25 )
26
26
27 # blanked usage of all the name to prevent pyflakes constraints
27 # blanked usage of all the name to prevent pyflakes constraints
28 # We need these name available in the module for extensions.
28 # We need these name available in the module for extensions.
29 REVIDX_ISCENSORED
29 REVIDX_ISCENSORED
30 REVIDX_ELLIPSIS
30 REVIDX_ELLIPSIS
31 REVIDX_EXTSTORED
31 REVIDX_EXTSTORED
32 REVIDX_DEFAULT_FLAGS
32 REVIDX_DEFAULT_FLAGS
33 REVIDX_FLAGS_ORDER
33 REVIDX_FLAGS_ORDER
34 REVIDX_RAWTEXT_CHANGING_FLAGS
34 REVIDX_RAWTEXT_CHANGING_FLAGS
35
35
36 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
36 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
37
37
38 # Store flag processors (cf. 'addflagprocessor()' to register)
38 # Store flag processors (cf. 'addflagprocessor()' to register)
39 flagprocessors = {
39 flagprocessors = {
40 REVIDX_ISCENSORED: None,
40 REVIDX_ISCENSORED: None,
41 }
41 }
42
42
43 def addflagprocessor(flag, processor):
44 """Register a flag processor on a revision data flag.
45
46 Invariant:
47 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
48 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
49 - Only one flag processor can be registered on a specific flag.
50 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
51 following signatures:
52 - (read) f(self, rawtext) -> text, bool
53 - (write) f(self, text) -> rawtext, bool
54 - (raw) f(self, rawtext) -> bool
55 "text" is presented to the user. "rawtext" is stored in revlog data, not
56 directly visible to the user.
57 The boolean returned by these transforms is used to determine whether
58 the returned text can be used for hash integrity checking. For example,
59 if "write" returns False, then "text" is used to generate hash. If
60 "write" returns True, that basically means "rawtext" returned by "write"
61 should be used to generate hash. Usually, "write" and "read" return
62 different booleans. And "raw" returns a same boolean as "write".
63
64 Note: The 'raw' transform is used for changegroup generation and in some
65 debug commands. In this case the transform only indicates whether the
66 contents can be used for hash integrity checks.
67 """
68 insertflagprocessor(flag, processor, flagprocessors)
69
43 def insertflagprocessor(flag, processor, flagprocessors):
70 def insertflagprocessor(flag, processor, flagprocessors):
44 if not flag & REVIDX_KNOWN_FLAGS:
71 if not flag & REVIDX_KNOWN_FLAGS:
45 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
72 msg = _("cannot register processor on unknown flag '%#x'.") % (flag)
46 raise error.ProgrammingError(msg)
73 raise error.ProgrammingError(msg)
47 if flag not in REVIDX_FLAGS_ORDER:
74 if flag not in REVIDX_FLAGS_ORDER:
48 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
75 msg = _("flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % (flag)
49 raise error.ProgrammingError(msg)
76 raise error.ProgrammingError(msg)
50 if flag in flagprocessors:
77 if flag in flagprocessors:
51 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
78 msg = _("cannot register multiple processors on flag '%#x'.") % (flag)
52 raise error.Abort(msg)
79 raise error.Abort(msg)
53 flagprocessors[flag] = processor
80 flagprocessors[flag] = processor
@@ -1,139 +1,139 b''
1 # coding=UTF-8
1 # coding=UTF-8
2
2
3 from __future__ import absolute_import
3 from __future__ import absolute_import
4
4
5 import base64
5 import base64
6 import zlib
6 import zlib
7
7
8 from mercurial import (
8 from mercurial import (
9 changegroup,
9 changegroup,
10 exchange,
10 exchange,
11 extensions,
11 extensions,
12 revlog,
12 revlog,
13 util,
13 util,
14 )
14 )
15 from mercurial.revlogutils import (
15 from mercurial.revlogutils import (
16 flagutil,
16 flagutil,
17 )
17 )
18
18
19 # Test only: These flags are defined here only in the context of testing the
19 # Test only: These flags are defined here only in the context of testing the
20 # behavior of the flag processor. The canonical way to add flags is to get in
20 # behavior of the flag processor. The canonical way to add flags is to get in
21 # touch with the community and make them known in revlog.
21 # touch with the community and make them known in revlog.
22 REVIDX_NOOP = (1 << 3)
22 REVIDX_NOOP = (1 << 3)
23 REVIDX_BASE64 = (1 << 2)
23 REVIDX_BASE64 = (1 << 2)
24 REVIDX_GZIP = (1 << 1)
24 REVIDX_GZIP = (1 << 1)
25 REVIDX_FAIL = 1
25 REVIDX_FAIL = 1
26
26
27 def validatehash(self, text):
27 def validatehash(self, text):
28 return True
28 return True
29
29
30 def bypass(self, text):
30 def bypass(self, text):
31 return False
31 return False
32
32
33 def noopdonothing(self, text):
33 def noopdonothing(self, text):
34 return (text, True)
34 return (text, True)
35
35
36 def b64encode(self, text):
36 def b64encode(self, text):
37 return (base64.b64encode(text), False)
37 return (base64.b64encode(text), False)
38
38
39 def b64decode(self, text):
39 def b64decode(self, text):
40 return (base64.b64decode(text), True)
40 return (base64.b64decode(text), True)
41
41
42 def gzipcompress(self, text):
42 def gzipcompress(self, text):
43 return (zlib.compress(text), False)
43 return (zlib.compress(text), False)
44
44
45 def gzipdecompress(self, text):
45 def gzipdecompress(self, text):
46 return (zlib.decompress(text), True)
46 return (zlib.decompress(text), True)
47
47
48 def supportedoutgoingversions(orig, repo):
48 def supportedoutgoingversions(orig, repo):
49 versions = orig(repo)
49 versions = orig(repo)
50 versions.discard(b'01')
50 versions.discard(b'01')
51 versions.discard(b'02')
51 versions.discard(b'02')
52 versions.add(b'03')
52 versions.add(b'03')
53 return versions
53 return versions
54
54
55 def allsupportedversions(orig, ui):
55 def allsupportedversions(orig, ui):
56 versions = orig(ui)
56 versions = orig(ui)
57 versions.add(b'03')
57 versions.add(b'03')
58 return versions
58 return versions
59
59
60 def makewrappedfile(obj):
60 def makewrappedfile(obj):
61 class wrappedfile(obj.__class__):
61 class wrappedfile(obj.__class__):
62 def addrevision(self, text, transaction, link, p1, p2,
62 def addrevision(self, text, transaction, link, p1, p2,
63 cachedelta=None, node=None,
63 cachedelta=None, node=None,
64 flags=flagutil.REVIDX_DEFAULT_FLAGS):
64 flags=flagutil.REVIDX_DEFAULT_FLAGS):
65 if b'[NOOP]' in text:
65 if b'[NOOP]' in text:
66 flags |= REVIDX_NOOP
66 flags |= REVIDX_NOOP
67
67
68 if b'[BASE64]' in text:
68 if b'[BASE64]' in text:
69 flags |= REVIDX_BASE64
69 flags |= REVIDX_BASE64
70
70
71 if b'[GZIP]' in text:
71 if b'[GZIP]' in text:
72 flags |= REVIDX_GZIP
72 flags |= REVIDX_GZIP
73
73
74 # This addrevision wrapper is meant to add a flag we will not have
74 # This addrevision wrapper is meant to add a flag we will not have
75 # transforms registered for, ensuring we handle this error case.
75 # transforms registered for, ensuring we handle this error case.
76 if b'[FAIL]' in text:
76 if b'[FAIL]' in text:
77 flags |= REVIDX_FAIL
77 flags |= REVIDX_FAIL
78
78
79 return super(wrappedfile, self).addrevision(text, transaction, link,
79 return super(wrappedfile, self).addrevision(text, transaction, link,
80 p1, p2,
80 p1, p2,
81 cachedelta=cachedelta,
81 cachedelta=cachedelta,
82 node=node,
82 node=node,
83 flags=flags)
83 flags=flags)
84
84
85 obj.__class__ = wrappedfile
85 obj.__class__ = wrappedfile
86
86
87 def reposetup(ui, repo):
87 def reposetup(ui, repo):
88 class wrappingflagprocessorrepo(repo.__class__):
88 class wrappingflagprocessorrepo(repo.__class__):
89 def file(self, f):
89 def file(self, f):
90 orig = super(wrappingflagprocessorrepo, self).file(f)
90 orig = super(wrappingflagprocessorrepo, self).file(f)
91 makewrappedfile(orig)
91 makewrappedfile(orig)
92 return orig
92 return orig
93
93
94 repo.__class__ = wrappingflagprocessorrepo
94 repo.__class__ = wrappingflagprocessorrepo
95
95
96 def extsetup(ui):
96 def extsetup(ui):
97 # Enable changegroup3 for flags to be sent over the wire
97 # Enable changegroup3 for flags to be sent over the wire
98 wrapfunction = extensions.wrapfunction
98 wrapfunction = extensions.wrapfunction
99 wrapfunction(changegroup,
99 wrapfunction(changegroup,
100 'supportedoutgoingversions',
100 'supportedoutgoingversions',
101 supportedoutgoingversions)
101 supportedoutgoingversions)
102 wrapfunction(changegroup,
102 wrapfunction(changegroup,
103 'allsupportedversions',
103 'allsupportedversions',
104 allsupportedversions)
104 allsupportedversions)
105
105
106 # Teach revlog about our test flags
106 # Teach revlog about our test flags
107 flags = [REVIDX_NOOP, REVIDX_BASE64, REVIDX_GZIP, REVIDX_FAIL]
107 flags = [REVIDX_NOOP, REVIDX_BASE64, REVIDX_GZIP, REVIDX_FAIL]
108 flagutil.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags)
108 flagutil.REVIDX_KNOWN_FLAGS |= util.bitsfrom(flags)
109 revlog.REVIDX_FLAGS_ORDER.extend(flags)
109 revlog.REVIDX_FLAGS_ORDER.extend(flags)
110
110
111 # Teach exchange to use changegroup 3
111 # Teach exchange to use changegroup 3
112 for k in exchange._bundlespeccontentopts.keys():
112 for k in exchange._bundlespeccontentopts.keys():
113 exchange._bundlespeccontentopts[k][b"cg.version"] = b"03"
113 exchange._bundlespeccontentopts[k][b"cg.version"] = b"03"
114
114
115 # Register flag processors for each extension
115 # Register flag processors for each extension
116 revlog.addflagprocessor(
116 flagutil.addflagprocessor(
117 REVIDX_NOOP,
117 REVIDX_NOOP,
118 (
118 (
119 noopdonothing,
119 noopdonothing,
120 noopdonothing,
120 noopdonothing,
121 validatehash,
121 validatehash,
122 )
122 )
123 )
123 )
124 revlog.addflagprocessor(
124 flagutil.addflagprocessor(
125 REVIDX_BASE64,
125 REVIDX_BASE64,
126 (
126 (
127 b64decode,
127 b64decode,
128 b64encode,
128 b64encode,
129 bypass,
129 bypass,
130 ),
130 ),
131 )
131 )
132 revlog.addflagprocessor(
132 flagutil.addflagprocessor(
133 REVIDX_GZIP,
133 REVIDX_GZIP,
134 (
134 (
135 gzipdecompress,
135 gzipdecompress,
136 gzipcompress,
136 gzipcompress,
137 bypass
137 bypass
138 )
138 )
139 )
139 )
@@ -1,304 +1,304 b''
1 # Create server
1 # Create server
2 $ hg init server
2 $ hg init server
3 $ cd server
3 $ cd server
4 $ cat >> .hg/hgrc << EOF
4 $ cat >> .hg/hgrc << EOF
5 > [extensions]
5 > [extensions]
6 > extension=$TESTDIR/flagprocessorext.py
6 > extension=$TESTDIR/flagprocessorext.py
7 > EOF
7 > EOF
8 $ cd ../
8 $ cd ../
9
9
10 # Clone server and enable extensions
10 # Clone server and enable extensions
11 $ hg clone -q server client
11 $ hg clone -q server client
12 $ cd client
12 $ cd client
13 $ cat >> .hg/hgrc << EOF
13 $ cat >> .hg/hgrc << EOF
14 > [extensions]
14 > [extensions]
15 > extension=$TESTDIR/flagprocessorext.py
15 > extension=$TESTDIR/flagprocessorext.py
16 > EOF
16 > EOF
17
17
18 # Commit file that will trigger the noop extension
18 # Commit file that will trigger the noop extension
19 $ echo '[NOOP]' > noop
19 $ echo '[NOOP]' > noop
20 $ hg commit -Aqm "noop"
20 $ hg commit -Aqm "noop"
21
21
22 # Commit file that will trigger the base64 extension
22 # Commit file that will trigger the base64 extension
23 $ echo '[BASE64]' > base64
23 $ echo '[BASE64]' > base64
24 $ hg commit -Aqm 'base64'
24 $ hg commit -Aqm 'base64'
25
25
26 # Commit file that will trigger the gzip extension
26 # Commit file that will trigger the gzip extension
27 $ echo '[GZIP]' > gzip
27 $ echo '[GZIP]' > gzip
28 $ hg commit -Aqm 'gzip'
28 $ hg commit -Aqm 'gzip'
29
29
30 # Commit file that will trigger noop and base64
30 # Commit file that will trigger noop and base64
31 $ echo '[NOOP][BASE64]' > noop-base64
31 $ echo '[NOOP][BASE64]' > noop-base64
32 $ hg commit -Aqm 'noop+base64'
32 $ hg commit -Aqm 'noop+base64'
33
33
34 # Commit file that will trigger noop and gzip
34 # Commit file that will trigger noop and gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
35 $ echo '[NOOP][GZIP]' > noop-gzip
36 $ hg commit -Aqm 'noop+gzip'
36 $ hg commit -Aqm 'noop+gzip'
37
37
38 # Commit file that will trigger base64 and gzip
38 # Commit file that will trigger base64 and gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
39 $ echo '[BASE64][GZIP]' > base64-gzip
40 $ hg commit -Aqm 'base64+gzip'
40 $ hg commit -Aqm 'base64+gzip'
41
41
42 # Commit file that will trigger base64, gzip and noop
42 # Commit file that will trigger base64, gzip and noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
43 $ echo '[BASE64][GZIP][NOOP]' > base64-gzip-noop
44 $ hg commit -Aqm 'base64+gzip+noop'
44 $ hg commit -Aqm 'base64+gzip+noop'
45
45
46 # TEST: ensure the revision data is consistent
46 # TEST: ensure the revision data is consistent
47 $ hg cat noop
47 $ hg cat noop
48 [NOOP]
48 [NOOP]
49 $ hg debugdata noop 0
49 $ hg debugdata noop 0
50 [NOOP]
50 [NOOP]
51
51
52 $ hg cat -r . base64
52 $ hg cat -r . base64
53 [BASE64]
53 [BASE64]
54 $ hg debugdata base64 0
54 $ hg debugdata base64 0
55 W0JBU0U2NF0K (no-eol)
55 W0JBU0U2NF0K (no-eol)
56
56
57 $ hg cat -r . gzip
57 $ hg cat -r . gzip
58 [GZIP]
58 [GZIP]
59 $ hg debugdata gzip 0
59 $ hg debugdata gzip 0
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
60 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
61
61
62 $ hg cat -r . noop-base64
62 $ hg cat -r . noop-base64
63 [NOOP][BASE64]
63 [NOOP][BASE64]
64 $ hg debugdata noop-base64 0
64 $ hg debugdata noop-base64 0
65 W05PT1BdW0JBU0U2NF0K (no-eol)
65 W05PT1BdW0JBU0U2NF0K (no-eol)
66
66
67 $ hg cat -r . noop-gzip
67 $ hg cat -r . noop-gzip
68 [NOOP][GZIP]
68 [NOOP][GZIP]
69 $ hg debugdata noop-gzip 0
69 $ hg debugdata noop-gzip 0
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
70 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
71
71
72 $ hg cat -r . base64-gzip
72 $ hg cat -r . base64-gzip
73 [BASE64][GZIP]
73 [BASE64][GZIP]
74 $ hg debugdata base64-gzip 0
74 $ hg debugdata base64-gzip 0
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
75 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
76
76
77 $ hg cat -r . base64-gzip-noop
77 $ hg cat -r . base64-gzip-noop
78 [BASE64][GZIP][NOOP]
78 [BASE64][GZIP][NOOP]
79 $ hg debugdata base64-gzip-noop 0
79 $ hg debugdata base64-gzip-noop 0
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
80 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
81
81
82 # Push to the server
82 # Push to the server
83 $ hg push
83 $ hg push
84 pushing to $TESTTMP/server
84 pushing to $TESTTMP/server
85 searching for changes
85 searching for changes
86 adding changesets
86 adding changesets
87 adding manifests
87 adding manifests
88 adding file changes
88 adding file changes
89 added 7 changesets with 7 changes to 7 files
89 added 7 changesets with 7 changes to 7 files
90
90
91 Ensure the data got to the server OK
91 Ensure the data got to the server OK
92
92
93 $ cd ../server
93 $ cd ../server
94 $ hg cat -r 6e48f4215d24 noop
94 $ hg cat -r 6e48f4215d24 noop
95 [NOOP]
95 [NOOP]
96 $ hg debugdata noop 0
96 $ hg debugdata noop 0
97 [NOOP]
97 [NOOP]
98
98
99 $ hg cat -r 6e48f4215d24 base64
99 $ hg cat -r 6e48f4215d24 base64
100 [BASE64]
100 [BASE64]
101 $ hg debugdata base64 0
101 $ hg debugdata base64 0
102 W0JBU0U2NF0K (no-eol)
102 W0JBU0U2NF0K (no-eol)
103
103
104 $ hg cat -r 6e48f4215d24 gzip
104 $ hg cat -r 6e48f4215d24 gzip
105 [GZIP]
105 [GZIP]
106 $ hg debugdata gzip 0
106 $ hg debugdata gzip 0
107 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
107 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
108
108
109 $ hg cat -r 6e48f4215d24 noop-base64
109 $ hg cat -r 6e48f4215d24 noop-base64
110 [NOOP][BASE64]
110 [NOOP][BASE64]
111 $ hg debugdata noop-base64 0
111 $ hg debugdata noop-base64 0
112 W05PT1BdW0JBU0U2NF0K (no-eol)
112 W05PT1BdW0JBU0U2NF0K (no-eol)
113
113
114 $ hg cat -r 6e48f4215d24 noop-gzip
114 $ hg cat -r 6e48f4215d24 noop-gzip
115 [NOOP][GZIP]
115 [NOOP][GZIP]
116 $ hg debugdata noop-gzip 0
116 $ hg debugdata noop-gzip 0
117 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
117 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
118
118
119 $ hg cat -r 6e48f4215d24 base64-gzip
119 $ hg cat -r 6e48f4215d24 base64-gzip
120 [BASE64][GZIP]
120 [BASE64][GZIP]
121 $ hg debugdata base64-gzip 0
121 $ hg debugdata base64-gzip 0
122 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
122 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
123
123
124 $ hg cat -r 6e48f4215d24 base64-gzip-noop
124 $ hg cat -r 6e48f4215d24 base64-gzip-noop
125 [BASE64][GZIP][NOOP]
125 [BASE64][GZIP][NOOP]
126 $ hg debugdata base64-gzip-noop 0
126 $ hg debugdata base64-gzip-noop 0
127 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
127 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
128
128
129 # Initialize new client (not cloning) and setup extension
129 # Initialize new client (not cloning) and setup extension
130 $ cd ..
130 $ cd ..
131 $ hg init client2
131 $ hg init client2
132 $ cd client2
132 $ cd client2
133 $ cat >> .hg/hgrc << EOF
133 $ cat >> .hg/hgrc << EOF
134 > [paths]
134 > [paths]
135 > default = $TESTTMP/server
135 > default = $TESTTMP/server
136 > [extensions]
136 > [extensions]
137 > extension=$TESTDIR/flagprocessorext.py
137 > extension=$TESTDIR/flagprocessorext.py
138 > EOF
138 > EOF
139
139
140 # Pull from server and update to latest revision
140 # Pull from server and update to latest revision
141 $ hg pull default
141 $ hg pull default
142 pulling from $TESTTMP/server
142 pulling from $TESTTMP/server
143 requesting all changes
143 requesting all changes
144 adding changesets
144 adding changesets
145 adding manifests
145 adding manifests
146 adding file changes
146 adding file changes
147 added 7 changesets with 7 changes to 7 files
147 added 7 changesets with 7 changes to 7 files
148 new changesets 07b1b9442c5b:6e48f4215d24
148 new changesets 07b1b9442c5b:6e48f4215d24
149 (run 'hg update' to get a working copy)
149 (run 'hg update' to get a working copy)
150 $ hg update
150 $ hg update
151 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
151 7 files updated, 0 files merged, 0 files removed, 0 files unresolved
152
152
153 # TEST: ensure the revision data is consistent
153 # TEST: ensure the revision data is consistent
154 $ hg cat noop
154 $ hg cat noop
155 [NOOP]
155 [NOOP]
156 $ hg debugdata noop 0
156 $ hg debugdata noop 0
157 [NOOP]
157 [NOOP]
158
158
159 $ hg cat -r . base64
159 $ hg cat -r . base64
160 [BASE64]
160 [BASE64]
161 $ hg debugdata base64 0
161 $ hg debugdata base64 0
162 W0JBU0U2NF0K (no-eol)
162 W0JBU0U2NF0K (no-eol)
163
163
164 $ hg cat -r . gzip
164 $ hg cat -r . gzip
165 [GZIP]
165 [GZIP]
166 $ hg debugdata gzip 0
166 $ hg debugdata gzip 0
167 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
167 x\x9c\x8bv\x8f\xf2\x0c\x88\xe5\x02\x00\x08\xc8\x01\xfd (no-eol) (esc)
168
168
169 $ hg cat -r . noop-base64
169 $ hg cat -r . noop-base64
170 [NOOP][BASE64]
170 [NOOP][BASE64]
171 $ hg debugdata noop-base64 0
171 $ hg debugdata noop-base64 0
172 W05PT1BdW0JBU0U2NF0K (no-eol)
172 W05PT1BdW0JBU0U2NF0K (no-eol)
173
173
174 $ hg cat -r . noop-gzip
174 $ hg cat -r . noop-gzip
175 [NOOP][GZIP]
175 [NOOP][GZIP]
176 $ hg debugdata noop-gzip 0
176 $ hg debugdata noop-gzip 0
177 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
177 x\x9c\x8b\xf6\xf3\xf7\x0f\x88\x8dv\x8f\xf2\x0c\x88\xe5\x02\x00\x1dH\x03\xf1 (no-eol) (esc)
178
178
179 $ hg cat -r . base64-gzip
179 $ hg cat -r . base64-gzip
180 [BASE64][GZIP]
180 [BASE64][GZIP]
181 $ hg debugdata base64-gzip 0
181 $ hg debugdata base64-gzip 0
182 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
182 eJyLdnIMdjUziY12j/IMiOUCACLBBDo= (no-eol)
183
183
184 $ hg cat -r . base64-gzip-noop
184 $ hg cat -r . base64-gzip-noop
185 [BASE64][GZIP][NOOP]
185 [BASE64][GZIP][NOOP]
186 $ hg debugdata base64-gzip-noop 0
186 $ hg debugdata base64-gzip-noop 0
187 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
187 eJyLdnIMdjUziY12j/IMiI328/cPiOUCAESjBi4= (no-eol)
188
188
189 # TEST: ensure a missing processor is handled
189 # TEST: ensure a missing processor is handled
190 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
190 $ echo '[FAIL][BASE64][GZIP][NOOP]' > fail-base64-gzip-noop
191 $ hg commit -Aqm 'fail+base64+gzip+noop'
191 $ hg commit -Aqm 'fail+base64+gzip+noop'
192 abort: missing processor for flag '0x1'!
192 abort: missing processor for flag '0x1'!
193 [255]
193 [255]
194 $ rm fail-base64-gzip-noop
194 $ rm fail-base64-gzip-noop
195
195
196 # TEST: ensure we cannot register several flag processors on the same flag
196 # TEST: ensure we cannot register several flag processors on the same flag
197 $ cat >> .hg/hgrc << EOF
197 $ cat >> .hg/hgrc << EOF
198 > [extensions]
198 > [extensions]
199 > extension=$TESTDIR/flagprocessorext.py
199 > extension=$TESTDIR/flagprocessorext.py
200 > duplicate=$TESTDIR/flagprocessorext.py
200 > duplicate=$TESTDIR/flagprocessorext.py
201 > EOF
201 > EOF
202 $ hg debugrebuilddirstate
202 $ hg debugrebuilddirstate
203 Traceback (most recent call last):
203 Traceback (most recent call last):
204 File "*/mercurial/extensions.py", line *, in _runextsetup (glob)
204 File "*/mercurial/extensions.py", line *, in _runextsetup (glob)
205 extsetup(ui)
205 extsetup(ui)
206 File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
206 File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
207 validatehash,
207 validatehash,
208 File "*/mercurial/revlog.py", line *, in addflagprocessor (glob)
208 File "*/mercurial/revlogutils/flagutil.py", line *, in addflagprocessor (glob)
209 flagutil.insertflagprocessor(flag, processor, flagutil.flagprocessors)
209 insertflagprocessor(flag, processor, flagprocessors)
210 File "*/mercurial/revlogutils/flagutil.py", line *, in insertflagprocessor (glob)
210 File "*/mercurial/revlogutils/flagutil.py", line *, in insertflagprocessor (glob)
211 raise error.Abort(msg)
211 raise error.Abort(msg)
212 mercurial.error.Abort: b"cannot register multiple processors on flag '0x8'." (py3 !)
212 mercurial.error.Abort: b"cannot register multiple processors on flag '0x8'." (py3 !)
213 Abort: cannot register multiple processors on flag '0x8'. (no-py3 !)
213 Abort: cannot register multiple processors on flag '0x8'. (no-py3 !)
214 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
214 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
215 $ hg st 2>&1 | egrep 'cannot register multiple processors|flagprocessorext'
215 $ hg st 2>&1 | egrep 'cannot register multiple processors|flagprocessorext'
216 File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
216 File "*/tests/flagprocessorext.py", line *, in extsetup (glob)
217 mercurial.error.Abort: b"cannot register multiple processors on flag '0x8'." (py3 !)
217 mercurial.error.Abort: b"cannot register multiple processors on flag '0x8'." (py3 !)
218 Abort: cannot register multiple processors on flag '0x8'. (no-py3 !)
218 Abort: cannot register multiple processors on flag '0x8'. (no-py3 !)
219 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
219 *** failed to set up extension duplicate: cannot register multiple processors on flag '0x8'.
220 File "*/tests/flagprocessorext.py", line *, in b64decode (glob)
220 File "*/tests/flagprocessorext.py", line *, in b64decode (glob)
221
221
222 $ cd ..
222 $ cd ..
223
223
224 # TEST: bundle repo
224 # TEST: bundle repo
225 $ hg init bundletest
225 $ hg init bundletest
226 $ cd bundletest
226 $ cd bundletest
227
227
228 $ cat >> .hg/hgrc << EOF
228 $ cat >> .hg/hgrc << EOF
229 > [extensions]
229 > [extensions]
230 > flagprocessor=$TESTDIR/flagprocessorext.py
230 > flagprocessor=$TESTDIR/flagprocessorext.py
231 > EOF
231 > EOF
232
232
233 $ for i in 0 single two three 4; do
233 $ for i in 0 single two three 4; do
234 > echo '[BASE64]a-bit-longer-'$i > base64
234 > echo '[BASE64]a-bit-longer-'$i > base64
235 > hg commit -m base64-$i -A base64
235 > hg commit -m base64-$i -A base64
236 > done
236 > done
237
237
238 $ hg update 2 -q
238 $ hg update 2 -q
239 $ echo '[BASE64]a-bit-longer-branching' > base64
239 $ echo '[BASE64]a-bit-longer-branching' > base64
240 $ hg commit -q -m branching
240 $ hg commit -q -m branching
241
241
242 #if repobundlerepo
242 #if repobundlerepo
243 $ hg bundle --base 1 bundle.hg
243 $ hg bundle --base 1 bundle.hg
244 4 changesets found
244 4 changesets found
245 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
245 $ hg --config extensions.strip= strip -r 2 --no-backup --force -q
246 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64
246 $ hg -R bundle.hg log --stat -T '{rev} {desc}\n' base64
247 5 branching
247 5 branching
248 base64 | 2 +-
248 base64 | 2 +-
249 1 files changed, 1 insertions(+), 1 deletions(-)
249 1 files changed, 1 insertions(+), 1 deletions(-)
250
250
251 4 base64-4
251 4 base64-4
252 base64 | 2 +-
252 base64 | 2 +-
253 1 files changed, 1 insertions(+), 1 deletions(-)
253 1 files changed, 1 insertions(+), 1 deletions(-)
254
254
255 3 base64-three
255 3 base64-three
256 base64 | 2 +-
256 base64 | 2 +-
257 1 files changed, 1 insertions(+), 1 deletions(-)
257 1 files changed, 1 insertions(+), 1 deletions(-)
258
258
259 2 base64-two
259 2 base64-two
260 base64 | 2 +-
260 base64 | 2 +-
261 1 files changed, 1 insertions(+), 1 deletions(-)
261 1 files changed, 1 insertions(+), 1 deletions(-)
262
262
263 1 base64-single
263 1 base64-single
264 base64 | 2 +-
264 base64 | 2 +-
265 1 files changed, 1 insertions(+), 1 deletions(-)
265 1 files changed, 1 insertions(+), 1 deletions(-)
266
266
267 0 base64-0
267 0 base64-0
268 base64 | 1 +
268 base64 | 1 +
269 1 files changed, 1 insertions(+), 0 deletions(-)
269 1 files changed, 1 insertions(+), 0 deletions(-)
270
270
271
271
272 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q
272 $ hg bundle -R bundle.hg --base 1 bundle-again.hg -q
273 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64
273 $ hg -R bundle-again.hg log --stat -T '{rev} {desc}\n' base64
274 5 branching
274 5 branching
275 base64 | 2 +-
275 base64 | 2 +-
276 1 files changed, 1 insertions(+), 1 deletions(-)
276 1 files changed, 1 insertions(+), 1 deletions(-)
277
277
278 4 base64-4
278 4 base64-4
279 base64 | 2 +-
279 base64 | 2 +-
280 1 files changed, 1 insertions(+), 1 deletions(-)
280 1 files changed, 1 insertions(+), 1 deletions(-)
281
281
282 3 base64-three
282 3 base64-three
283 base64 | 2 +-
283 base64 | 2 +-
284 1 files changed, 1 insertions(+), 1 deletions(-)
284 1 files changed, 1 insertions(+), 1 deletions(-)
285
285
286 2 base64-two
286 2 base64-two
287 base64 | 2 +-
287 base64 | 2 +-
288 1 files changed, 1 insertions(+), 1 deletions(-)
288 1 files changed, 1 insertions(+), 1 deletions(-)
289
289
290 1 base64-single
290 1 base64-single
291 base64 | 2 +-
291 base64 | 2 +-
292 1 files changed, 1 insertions(+), 1 deletions(-)
292 1 files changed, 1 insertions(+), 1 deletions(-)
293
293
294 0 base64-0
294 0 base64-0
295 base64 | 1 +
295 base64 | 1 +
296 1 files changed, 1 insertions(+), 0 deletions(-)
296 1 files changed, 1 insertions(+), 0 deletions(-)
297
297
298 $ rm bundle.hg bundle-again.hg
298 $ rm bundle.hg bundle-again.hg
299 #endif
299 #endif
300
300
301 # TEST: hg status
301 # TEST: hg status
302
302
303 $ hg status
303 $ hg status
304 $ hg diff
304 $ hg diff
@@ -1,451 +1,452 b''
1 # test revlog interaction about raw data (flagprocessor)
1 # test revlog interaction about raw data (flagprocessor)
2
2
3 from __future__ import absolute_import, print_function
3 from __future__ import absolute_import, print_function
4
4
5 import collections
5 import collections
6 import hashlib
6 import hashlib
7 import sys
7 import sys
8
8
9 from mercurial import (
9 from mercurial import (
10 encoding,
10 encoding,
11 node,
11 node,
12 revlog,
12 revlog,
13 transaction,
13 transaction,
14 vfs,
14 vfs,
15 )
15 )
16
16
17 from mercurial.revlogutils import (
17 from mercurial.revlogutils import (
18 deltas,
18 deltas,
19 flagutil,
19 )
20 )
20
21
21 # TESTTMP is optional. This makes it convenient to run without run-tests.py
22 # TESTTMP is optional. This makes it convenient to run without run-tests.py
22 tvfs = vfs.vfs(encoding.environ.get(b'TESTTMP', b'/tmp'))
23 tvfs = vfs.vfs(encoding.environ.get(b'TESTTMP', b'/tmp'))
23
24
24 # Enable generaldelta otherwise revlog won't use delta as expected by the test
25 # Enable generaldelta otherwise revlog won't use delta as expected by the test
25 tvfs.options = {b'generaldelta': True, b'revlogv1': True,
26 tvfs.options = {b'generaldelta': True, b'revlogv1': True,
26 b'sparse-revlog': True}
27 b'sparse-revlog': True}
27
28
28 # The test wants to control whether to use delta explicitly, based on
29 # The test wants to control whether to use delta explicitly, based on
29 # "storedeltachains".
30 # "storedeltachains".
30 revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self._storedeltachains
31 revlog.revlog._isgooddeltainfo = lambda self, d, textlen: self._storedeltachains
31
32
32 def abort(msg):
33 def abort(msg):
33 print('abort: %s' % msg)
34 print('abort: %s' % msg)
34 # Return 0 so run-tests.py could compare the output.
35 # Return 0 so run-tests.py could compare the output.
35 sys.exit()
36 sys.exit()
36
37
37 # Register a revlog processor for flag EXTSTORED.
38 # Register a revlog processor for flag EXTSTORED.
38 #
39 #
39 # It simply prepends a fixed header, and replaces '1' to 'i'. So it has
40 # It simply prepends a fixed header, and replaces '1' to 'i'. So it has
40 # insertion and replacement, and may be interesting to test revlog's line-based
41 # insertion and replacement, and may be interesting to test revlog's line-based
41 # deltas.
42 # deltas.
42 _extheader = b'E\n'
43 _extheader = b'E\n'
43
44
44 def readprocessor(self, rawtext):
45 def readprocessor(self, rawtext):
45 # True: the returned text could be used to verify hash
46 # True: the returned text could be used to verify hash
46 text = rawtext[len(_extheader):].replace(b'i', b'1')
47 text = rawtext[len(_extheader):].replace(b'i', b'1')
47 return text, True
48 return text, True
48
49
49 def writeprocessor(self, text):
50 def writeprocessor(self, text):
50 # False: the returned rawtext shouldn't be used to verify hash
51 # False: the returned rawtext shouldn't be used to verify hash
51 rawtext = _extheader + text.replace(b'1', b'i')
52 rawtext = _extheader + text.replace(b'1', b'i')
52 return rawtext, False
53 return rawtext, False
53
54
54 def rawprocessor(self, rawtext):
55 def rawprocessor(self, rawtext):
55 # False: do not verify hash. Only the content returned by "readprocessor"
56 # False: do not verify hash. Only the content returned by "readprocessor"
56 # can be used to verify hash.
57 # can be used to verify hash.
57 return False
58 return False
58
59
59 revlog.addflagprocessor(revlog.REVIDX_EXTSTORED,
60 flagutil.addflagprocessor(revlog.REVIDX_EXTSTORED,
60 (readprocessor, writeprocessor, rawprocessor))
61 (readprocessor, writeprocessor, rawprocessor))
61
62
62 # Utilities about reading and appending revlog
63 # Utilities about reading and appending revlog
63
64
64 def newtransaction():
65 def newtransaction():
65 # A transaction is required to write revlogs
66 # A transaction is required to write revlogs
66 report = lambda msg: None
67 report = lambda msg: None
67 return transaction.transaction(report, tvfs, {'plain': tvfs}, b'journal')
68 return transaction.transaction(report, tvfs, {'plain': tvfs}, b'journal')
68
69
69 def newrevlog(name=b'_testrevlog.i', recreate=False):
70 def newrevlog(name=b'_testrevlog.i', recreate=False):
70 if recreate:
71 if recreate:
71 tvfs.tryunlink(name)
72 tvfs.tryunlink(name)
72 rlog = revlog.revlog(tvfs, name)
73 rlog = revlog.revlog(tvfs, name)
73 return rlog
74 return rlog
74
75
75 def appendrev(rlog, text, tr, isext=False, isdelta=True):
76 def appendrev(rlog, text, tr, isext=False, isdelta=True):
76 '''Append a revision. If isext is True, set the EXTSTORED flag so flag
77 '''Append a revision. If isext is True, set the EXTSTORED flag so flag
77 processor will be used (and rawtext is different from text). If isdelta is
78 processor will be used (and rawtext is different from text). If isdelta is
78 True, force the revision to be a delta, otherwise it's full text.
79 True, force the revision to be a delta, otherwise it's full text.
79 '''
80 '''
80 nextrev = len(rlog)
81 nextrev = len(rlog)
81 p1 = rlog.node(nextrev - 1)
82 p1 = rlog.node(nextrev - 1)
82 p2 = node.nullid
83 p2 = node.nullid
83 if isext:
84 if isext:
84 flags = revlog.REVIDX_EXTSTORED
85 flags = revlog.REVIDX_EXTSTORED
85 else:
86 else:
86 flags = revlog.REVIDX_DEFAULT_FLAGS
87 flags = revlog.REVIDX_DEFAULT_FLAGS
87 # Change storedeltachains temporarily, to override revlog's delta decision
88 # Change storedeltachains temporarily, to override revlog's delta decision
88 rlog._storedeltachains = isdelta
89 rlog._storedeltachains = isdelta
89 try:
90 try:
90 rlog.addrevision(text, tr, nextrev, p1, p2, flags=flags)
91 rlog.addrevision(text, tr, nextrev, p1, p2, flags=flags)
91 return nextrev
92 return nextrev
92 except Exception as ex:
93 except Exception as ex:
93 abort('rev %d: failed to append: %s' % (nextrev, ex))
94 abort('rev %d: failed to append: %s' % (nextrev, ex))
94 finally:
95 finally:
95 # Restore storedeltachains. It is always True, see revlog.__init__
96 # Restore storedeltachains. It is always True, see revlog.__init__
96 rlog._storedeltachains = True
97 rlog._storedeltachains = True
97
98
98 def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True):
99 def addgroupcopy(rlog, tr, destname=b'_destrevlog.i', optimaldelta=True):
99 '''Copy revlog to destname using revlog.addgroup. Return the copied revlog.
100 '''Copy revlog to destname using revlog.addgroup. Return the copied revlog.
100
101
101 This emulates push or pull. They use changegroup. Changegroup requires
102 This emulates push or pull. They use changegroup. Changegroup requires
102 repo to work. We don't have a repo, so a dummy changegroup is used.
103 repo to work. We don't have a repo, so a dummy changegroup is used.
103
104
104 If optimaldelta is True, use optimized delta parent, so the destination
105 If optimaldelta is True, use optimized delta parent, so the destination
105 revlog could probably reuse it. Otherwise it builds sub-optimal delta, and
106 revlog could probably reuse it. Otherwise it builds sub-optimal delta, and
106 the destination revlog needs more work to use it.
107 the destination revlog needs more work to use it.
107
108
108 This exercises some revlog.addgroup (and revlog._addrevision(text=None))
109 This exercises some revlog.addgroup (and revlog._addrevision(text=None))
109 code path, which is not covered by "appendrev" alone.
110 code path, which is not covered by "appendrev" alone.
110 '''
111 '''
111 class dummychangegroup(object):
112 class dummychangegroup(object):
112 @staticmethod
113 @staticmethod
113 def deltachunk(pnode):
114 def deltachunk(pnode):
114 pnode = pnode or node.nullid
115 pnode = pnode or node.nullid
115 parentrev = rlog.rev(pnode)
116 parentrev = rlog.rev(pnode)
116 r = parentrev + 1
117 r = parentrev + 1
117 if r >= len(rlog):
118 if r >= len(rlog):
118 return {}
119 return {}
119 if optimaldelta:
120 if optimaldelta:
120 deltaparent = parentrev
121 deltaparent = parentrev
121 else:
122 else:
122 # suboptimal deltaparent
123 # suboptimal deltaparent
123 deltaparent = min(0, parentrev)
124 deltaparent = min(0, parentrev)
124 if not rlog.candelta(deltaparent, r):
125 if not rlog.candelta(deltaparent, r):
125 deltaparent = -1
126 deltaparent = -1
126 return {b'node': rlog.node(r), b'p1': pnode, b'p2': node.nullid,
127 return {b'node': rlog.node(r), b'p1': pnode, b'p2': node.nullid,
127 b'cs': rlog.node(rlog.linkrev(r)), b'flags': rlog.flags(r),
128 b'cs': rlog.node(rlog.linkrev(r)), b'flags': rlog.flags(r),
128 b'deltabase': rlog.node(deltaparent),
129 b'deltabase': rlog.node(deltaparent),
129 b'delta': rlog.revdiff(deltaparent, r)}
130 b'delta': rlog.revdiff(deltaparent, r)}
130
131
131 def deltaiter(self):
132 def deltaiter(self):
132 chain = None
133 chain = None
133 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
134 for chunkdata in iter(lambda: self.deltachunk(chain), {}):
134 node = chunkdata[b'node']
135 node = chunkdata[b'node']
135 p1 = chunkdata[b'p1']
136 p1 = chunkdata[b'p1']
136 p2 = chunkdata[b'p2']
137 p2 = chunkdata[b'p2']
137 cs = chunkdata[b'cs']
138 cs = chunkdata[b'cs']
138 deltabase = chunkdata[b'deltabase']
139 deltabase = chunkdata[b'deltabase']
139 delta = chunkdata[b'delta']
140 delta = chunkdata[b'delta']
140 flags = chunkdata[b'flags']
141 flags = chunkdata[b'flags']
141
142
142 chain = node
143 chain = node
143
144
144 yield (node, p1, p2, cs, deltabase, delta, flags)
145 yield (node, p1, p2, cs, deltabase, delta, flags)
145
146
146 def linkmap(lnode):
147 def linkmap(lnode):
147 return rlog.rev(lnode)
148 return rlog.rev(lnode)
148
149
149 dlog = newrevlog(destname, recreate=True)
150 dlog = newrevlog(destname, recreate=True)
150 dummydeltas = dummychangegroup().deltaiter()
151 dummydeltas = dummychangegroup().deltaiter()
151 dlog.addgroup(dummydeltas, linkmap, tr)
152 dlog.addgroup(dummydeltas, linkmap, tr)
152 return dlog
153 return dlog
153
154
154 def lowlevelcopy(rlog, tr, destname=b'_destrevlog.i'):
155 def lowlevelcopy(rlog, tr, destname=b'_destrevlog.i'):
155 '''Like addgroupcopy, but use the low level revlog._addrevision directly.
156 '''Like addgroupcopy, but use the low level revlog._addrevision directly.
156
157
157 It exercises some code paths that are hard to reach easily otherwise.
158 It exercises some code paths that are hard to reach easily otherwise.
158 '''
159 '''
159 dlog = newrevlog(destname, recreate=True)
160 dlog = newrevlog(destname, recreate=True)
160 for r in rlog:
161 for r in rlog:
161 p1 = rlog.node(r - 1)
162 p1 = rlog.node(r - 1)
162 p2 = node.nullid
163 p2 = node.nullid
163 if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED):
164 if r == 0 or (rlog.flags(r) & revlog.REVIDX_EXTSTORED):
164 text = rlog.revision(r, raw=True)
165 text = rlog.revision(r, raw=True)
165 cachedelta = None
166 cachedelta = None
166 else:
167 else:
167 # deltaparent cannot have EXTSTORED flag.
168 # deltaparent cannot have EXTSTORED flag.
168 deltaparent = max([-1] +
169 deltaparent = max([-1] +
169 [p for p in range(r)
170 [p for p in range(r)
170 if rlog.flags(p) & revlog.REVIDX_EXTSTORED == 0])
171 if rlog.flags(p) & revlog.REVIDX_EXTSTORED == 0])
171 text = None
172 text = None
172 cachedelta = (deltaparent, rlog.revdiff(deltaparent, r))
173 cachedelta = (deltaparent, rlog.revdiff(deltaparent, r))
173 flags = rlog.flags(r)
174 flags = rlog.flags(r)
174 ifh = dfh = None
175 ifh = dfh = None
175 try:
176 try:
176 ifh = dlog.opener(dlog.indexfile, b'a+')
177 ifh = dlog.opener(dlog.indexfile, b'a+')
177 if not dlog._inline:
178 if not dlog._inline:
178 dfh = dlog.opener(dlog.datafile, b'a+')
179 dfh = dlog.opener(dlog.datafile, b'a+')
179 dlog._addrevision(rlog.node(r), text, tr, r, p1, p2, flags,
180 dlog._addrevision(rlog.node(r), text, tr, r, p1, p2, flags,
180 cachedelta, ifh, dfh)
181 cachedelta, ifh, dfh)
181 finally:
182 finally:
182 if dfh is not None:
183 if dfh is not None:
183 dfh.close()
184 dfh.close()
184 if ifh is not None:
185 if ifh is not None:
185 ifh.close()
186 ifh.close()
186 return dlog
187 return dlog
187
188
188 # Utilities to generate revisions for testing
189 # Utilities to generate revisions for testing
189
190
190 def genbits(n):
191 def genbits(n):
191 '''Given a number n, generate (2 ** (n * 2) + 1) numbers in range(2 ** n).
192 '''Given a number n, generate (2 ** (n * 2) + 1) numbers in range(2 ** n).
192 i.e. the generated numbers have a width of n bits.
193 i.e. the generated numbers have a width of n bits.
193
194
194 The combination of two adjacent numbers will cover all possible cases.
195 The combination of two adjacent numbers will cover all possible cases.
195 That is to say, given any x, y where both x, and y are in range(2 ** n),
196 That is to say, given any x, y where both x, and y are in range(2 ** n),
196 there is an x followed immediately by y in the generated sequence.
197 there is an x followed immediately by y in the generated sequence.
197 '''
198 '''
198 m = 2 ** n
199 m = 2 ** n
199
200
200 # Gray Code. See https://en.wikipedia.org/wiki/Gray_code
201 # Gray Code. See https://en.wikipedia.org/wiki/Gray_code
201 gray = lambda x: x ^ (x >> 1)
202 gray = lambda x: x ^ (x >> 1)
202 reversegray = dict((gray(i), i) for i in range(m))
203 reversegray = dict((gray(i), i) for i in range(m))
203
204
204 # Generate (n * 2) bit gray code, yield lower n bits as X, and look for
205 # Generate (n * 2) bit gray code, yield lower n bits as X, and look for
205 # the next unused gray code where higher n bits equal to X.
206 # the next unused gray code where higher n bits equal to X.
206
207
207 # For gray codes whose higher bits are X, a[X] of them have been used.
208 # For gray codes whose higher bits are X, a[X] of them have been used.
208 a = [0] * m
209 a = [0] * m
209
210
210 # Iterate from 0.
211 # Iterate from 0.
211 x = 0
212 x = 0
212 yield x
213 yield x
213 for i in range(m * m):
214 for i in range(m * m):
214 x = reversegray[x]
215 x = reversegray[x]
215 y = gray(a[x] + x * m) & (m - 1)
216 y = gray(a[x] + x * m) & (m - 1)
216 assert a[x] < m
217 assert a[x] < m
217 a[x] += 1
218 a[x] += 1
218 x = y
219 x = y
219 yield x
220 yield x
220
221
221 def gentext(rev):
222 def gentext(rev):
222 '''Given a revision number, generate dummy text'''
223 '''Given a revision number, generate dummy text'''
223 return b''.join(b'%d\n' % j for j in range(-1, rev % 5))
224 return b''.join(b'%d\n' % j for j in range(-1, rev % 5))
224
225
225 def writecases(rlog, tr):
226 def writecases(rlog, tr):
226 '''Write some revisions interested to the test.
227 '''Write some revisions interested to the test.
227
228
228 The test is interested in 3 properties of a revision:
229 The test is interested in 3 properties of a revision:
229
230
230 - Is it a delta or a full text? (isdelta)
231 - Is it a delta or a full text? (isdelta)
231 This is to catch some delta application issues.
232 This is to catch some delta application issues.
232 - Does it have a flag of EXTSTORED? (isext)
233 - Does it have a flag of EXTSTORED? (isext)
233 This is to catch some flag processor issues. Especially when
234 This is to catch some flag processor issues. Especially when
234 interacted with revlog deltas.
235 interacted with revlog deltas.
235 - Is its text empty? (isempty)
236 - Is its text empty? (isempty)
236 This is less important. It is intended to try to catch some careless
237 This is less important. It is intended to try to catch some careless
237 checks like "if text" instead of "if text is None". Note: if flag
238 checks like "if text" instead of "if text is None". Note: if flag
238 processor is involved, raw text may be not empty.
239 processor is involved, raw text may be not empty.
239
240
240 Write 65 revisions. So that all combinations of the above flags for
241 Write 65 revisions. So that all combinations of the above flags for
241 adjacent revisions are covered. That is to say,
242 adjacent revisions are covered. That is to say,
242
243
243 len(set(
244 len(set(
244 (r.delta, r.ext, r.empty, (r+1).delta, (r+1).ext, (r+1).empty)
245 (r.delta, r.ext, r.empty, (r+1).delta, (r+1).ext, (r+1).empty)
245 for r in range(len(rlog) - 1)
246 for r in range(len(rlog) - 1)
246 )) is 64.
247 )) is 64.
247
248
248 Where "r.delta", "r.ext", and "r.empty" are booleans matching properties
249 Where "r.delta", "r.ext", and "r.empty" are booleans matching properties
249 mentioned above.
250 mentioned above.
250
251
251 Return expected [(text, rawtext)].
252 Return expected [(text, rawtext)].
252 '''
253 '''
253 result = []
254 result = []
254 for i, x in enumerate(genbits(3)):
255 for i, x in enumerate(genbits(3)):
255 isdelta, isext, isempty = bool(x & 1), bool(x & 2), bool(x & 4)
256 isdelta, isext, isempty = bool(x & 1), bool(x & 2), bool(x & 4)
256 if isempty:
257 if isempty:
257 text = b''
258 text = b''
258 else:
259 else:
259 text = gentext(i)
260 text = gentext(i)
260 rev = appendrev(rlog, text, tr, isext=isext, isdelta=isdelta)
261 rev = appendrev(rlog, text, tr, isext=isext, isdelta=isdelta)
261
262
262 # Verify text, rawtext, and rawsize
263 # Verify text, rawtext, and rawsize
263 if isext:
264 if isext:
264 rawtext = writeprocessor(None, text)[0]
265 rawtext = writeprocessor(None, text)[0]
265 else:
266 else:
266 rawtext = text
267 rawtext = text
267 if rlog.rawsize(rev) != len(rawtext):
268 if rlog.rawsize(rev) != len(rawtext):
268 abort('rev %d: wrong rawsize' % rev)
269 abort('rev %d: wrong rawsize' % rev)
269 if rlog.revision(rev, raw=False) != text:
270 if rlog.revision(rev, raw=False) != text:
270 abort('rev %d: wrong text' % rev)
271 abort('rev %d: wrong text' % rev)
271 if rlog.revision(rev, raw=True) != rawtext:
272 if rlog.revision(rev, raw=True) != rawtext:
272 abort('rev %d: wrong rawtext' % rev)
273 abort('rev %d: wrong rawtext' % rev)
273 result.append((text, rawtext))
274 result.append((text, rawtext))
274
275
275 # Verify flags like isdelta, isext work as expected
276 # Verify flags like isdelta, isext work as expected
276 # isdelta can be overridden to False if this or p1 has isext set
277 # isdelta can be overridden to False if this or p1 has isext set
277 if bool(rlog.deltaparent(rev) > -1) and not isdelta:
278 if bool(rlog.deltaparent(rev) > -1) and not isdelta:
278 abort('rev %d: isdelta is unexpected' % rev)
279 abort('rev %d: isdelta is unexpected' % rev)
279 if bool(rlog.flags(rev)) != isext:
280 if bool(rlog.flags(rev)) != isext:
280 abort('rev %d: isext is ineffective' % rev)
281 abort('rev %d: isext is ineffective' % rev)
281 return result
282 return result
282
283
283 # Main test and checking
284 # Main test and checking
284
285
285 def checkrevlog(rlog, expected):
286 def checkrevlog(rlog, expected):
286 '''Check if revlog has expected contents. expected is [(text, rawtext)]'''
287 '''Check if revlog has expected contents. expected is [(text, rawtext)]'''
287 # Test using different access orders. This could expose some issues
288 # Test using different access orders. This could expose some issues
288 # depending on revlog caching (see revlog._cache).
289 # depending on revlog caching (see revlog._cache).
289 for r0 in range(len(rlog) - 1):
290 for r0 in range(len(rlog) - 1):
290 r1 = r0 + 1
291 r1 = r0 + 1
291 for revorder in [[r0, r1], [r1, r0]]:
292 for revorder in [[r0, r1], [r1, r0]]:
292 for raworder in [[True], [False], [True, False], [False, True]]:
293 for raworder in [[True], [False], [True, False], [False, True]]:
293 nlog = newrevlog()
294 nlog = newrevlog()
294 for rev in revorder:
295 for rev in revorder:
295 for raw in raworder:
296 for raw in raworder:
296 t = nlog.revision(rev, raw=raw)
297 t = nlog.revision(rev, raw=raw)
297 if t != expected[rev][int(raw)]:
298 if t != expected[rev][int(raw)]:
298 abort('rev %d: corrupted %stext'
299 abort('rev %d: corrupted %stext'
299 % (rev, raw and 'raw' or ''))
300 % (rev, raw and 'raw' or ''))
300
301
301 slicingdata = [
302 slicingdata = [
302 ([0, 1, 2, 3, 55, 56, 58, 59, 60],
303 ([0, 1, 2, 3, 55, 56, 58, 59, 60],
303 [[0, 1], [2], [58], [59, 60]],
304 [[0, 1], [2], [58], [59, 60]],
304 10),
305 10),
305 ([0, 1, 2, 3, 55, 56, 58, 59, 60],
306 ([0, 1, 2, 3, 55, 56, 58, 59, 60],
306 [[0, 1], [2], [58], [59, 60]],
307 [[0, 1], [2], [58], [59, 60]],
307 10),
308 10),
308 ([-1, 0, 1, 2, 3, 55, 56, 58, 59, 60],
309 ([-1, 0, 1, 2, 3, 55, 56, 58, 59, 60],
309 [[-1, 0, 1], [2], [58], [59, 60]],
310 [[-1, 0, 1], [2], [58], [59, 60]],
310 10),
311 10),
311 ]
312 ]
312
313
313 def slicingtest(rlog):
314 def slicingtest(rlog):
314 oldmin = rlog._srmingapsize
315 oldmin = rlog._srmingapsize
315 try:
316 try:
316 # the test revlog is small, we remove the floor under which we
317 # the test revlog is small, we remove the floor under which we
317 # slicing is diregarded.
318 # slicing is diregarded.
318 rlog._srmingapsize = 0
319 rlog._srmingapsize = 0
319 for item in slicingdata:
320 for item in slicingdata:
320 chain, expected, target = item
321 chain, expected, target = item
321 result = deltas.slicechunk(rlog, chain, targetsize=target)
322 result = deltas.slicechunk(rlog, chain, targetsize=target)
322 result = list(result)
323 result = list(result)
323 if result != expected:
324 if result != expected:
324 print('slicing differ:')
325 print('slicing differ:')
325 print(' chain: %s' % chain)
326 print(' chain: %s' % chain)
326 print(' target: %s' % target)
327 print(' target: %s' % target)
327 print(' expected: %s' % expected)
328 print(' expected: %s' % expected)
328 print(' result: %s' % result)
329 print(' result: %s' % result)
329 finally:
330 finally:
330 rlog._srmingapsize = oldmin
331 rlog._srmingapsize = oldmin
331
332
332 def md5sum(s):
333 def md5sum(s):
333 return hashlib.md5(s).digest()
334 return hashlib.md5(s).digest()
334
335
335 def _maketext(*coord):
336 def _maketext(*coord):
336 """create piece of text according to range of integers
337 """create piece of text according to range of integers
337
338
338 The test returned use a md5sum of the integer to make it less
339 The test returned use a md5sum of the integer to make it less
339 compressible"""
340 compressible"""
340 pieces = []
341 pieces = []
341 for start, size in coord:
342 for start, size in coord:
342 num = range(start, start + size)
343 num = range(start, start + size)
343 p = [md5sum(b'%d' % r) for r in num]
344 p = [md5sum(b'%d' % r) for r in num]
344 pieces.append(b'\n'.join(p))
345 pieces.append(b'\n'.join(p))
345 return b'\n'.join(pieces) + b'\n'
346 return b'\n'.join(pieces) + b'\n'
346
347
347 data = [
348 data = [
348 _maketext((0, 120), (456, 60)),
349 _maketext((0, 120), (456, 60)),
349 _maketext((0, 120), (345, 60)),
350 _maketext((0, 120), (345, 60)),
350 _maketext((0, 120), (734, 60)),
351 _maketext((0, 120), (734, 60)),
351 _maketext((0, 120), (734, 60), (923, 45)),
352 _maketext((0, 120), (734, 60), (923, 45)),
352 _maketext((0, 120), (734, 60), (234, 45)),
353 _maketext((0, 120), (734, 60), (234, 45)),
353 _maketext((0, 120), (734, 60), (564, 45)),
354 _maketext((0, 120), (734, 60), (564, 45)),
354 _maketext((0, 120), (734, 60), (361, 45)),
355 _maketext((0, 120), (734, 60), (361, 45)),
355 _maketext((0, 120), (734, 60), (489, 45)),
356 _maketext((0, 120), (734, 60), (489, 45)),
356 _maketext((0, 120), (123, 60)),
357 _maketext((0, 120), (123, 60)),
357 _maketext((0, 120), (145, 60)),
358 _maketext((0, 120), (145, 60)),
358 _maketext((0, 120), (104, 60)),
359 _maketext((0, 120), (104, 60)),
359 _maketext((0, 120), (430, 60)),
360 _maketext((0, 120), (430, 60)),
360 _maketext((0, 120), (430, 60), (923, 45)),
361 _maketext((0, 120), (430, 60), (923, 45)),
361 _maketext((0, 120), (430, 60), (234, 45)),
362 _maketext((0, 120), (430, 60), (234, 45)),
362 _maketext((0, 120), (430, 60), (564, 45)),
363 _maketext((0, 120), (430, 60), (564, 45)),
363 _maketext((0, 120), (430, 60), (361, 45)),
364 _maketext((0, 120), (430, 60), (361, 45)),
364 _maketext((0, 120), (430, 60), (489, 45)),
365 _maketext((0, 120), (430, 60), (489, 45)),
365 _maketext((0, 120), (249, 60)),
366 _maketext((0, 120), (249, 60)),
366 _maketext((0, 120), (832, 60)),
367 _maketext((0, 120), (832, 60)),
367 _maketext((0, 120), (891, 60)),
368 _maketext((0, 120), (891, 60)),
368 _maketext((0, 120), (543, 60)),
369 _maketext((0, 120), (543, 60)),
369 _maketext((0, 120), (120, 60)),
370 _maketext((0, 120), (120, 60)),
370 _maketext((0, 120), (60, 60), (768, 30)),
371 _maketext((0, 120), (60, 60), (768, 30)),
371 _maketext((0, 120), (60, 60), (260, 30)),
372 _maketext((0, 120), (60, 60), (260, 30)),
372 _maketext((0, 120), (60, 60), (450, 30)),
373 _maketext((0, 120), (60, 60), (450, 30)),
373 _maketext((0, 120), (60, 60), (361, 30)),
374 _maketext((0, 120), (60, 60), (361, 30)),
374 _maketext((0, 120), (60, 60), (886, 30)),
375 _maketext((0, 120), (60, 60), (886, 30)),
375 _maketext((0, 120), (60, 60), (116, 30)),
376 _maketext((0, 120), (60, 60), (116, 30)),
376 _maketext((0, 120), (60, 60), (567, 30), (629, 40)),
377 _maketext((0, 120), (60, 60), (567, 30), (629, 40)),
377 _maketext((0, 120), (60, 60), (569, 30), (745, 40)),
378 _maketext((0, 120), (60, 60), (569, 30), (745, 40)),
378 _maketext((0, 120), (60, 60), (777, 30), (700, 40)),
379 _maketext((0, 120), (60, 60), (777, 30), (700, 40)),
379 _maketext((0, 120), (60, 60), (618, 30), (398, 40), (158, 10)),
380 _maketext((0, 120), (60, 60), (618, 30), (398, 40), (158, 10)),
380 ]
381 ]
381
382
382 def makesnapshot(tr):
383 def makesnapshot(tr):
383 rl = newrevlog(name=b'_snaprevlog3.i', recreate=True)
384 rl = newrevlog(name=b'_snaprevlog3.i', recreate=True)
384 for i in data:
385 for i in data:
385 appendrev(rl, i, tr)
386 appendrev(rl, i, tr)
386 return rl
387 return rl
387
388
388 snapshots = [-1, 0, 6, 8, 11, 17, 19, 21, 25, 30]
389 snapshots = [-1, 0, 6, 8, 11, 17, 19, 21, 25, 30]
389 def issnapshottest(rlog):
390 def issnapshottest(rlog):
390 result = []
391 result = []
391 if rlog.issnapshot(-1):
392 if rlog.issnapshot(-1):
392 result.append(-1)
393 result.append(-1)
393 for rev in rlog:
394 for rev in rlog:
394 if rlog.issnapshot(rev):
395 if rlog.issnapshot(rev):
395 result.append(rev)
396 result.append(rev)
396 if snapshots != result:
397 if snapshots != result:
397 print('snapshot differ:')
398 print('snapshot differ:')
398 print(' expected: %s' % snapshots)
399 print(' expected: %s' % snapshots)
399 print(' got: %s' % result)
400 print(' got: %s' % result)
400
401
401 snapshotmapall = {0: [6, 8, 11, 17, 19, 25], 8: [21], -1: [0, 30]}
402 snapshotmapall = {0: [6, 8, 11, 17, 19, 25], 8: [21], -1: [0, 30]}
402 snapshotmap15 = {0: [17, 19, 25], 8: [21], -1: [30]}
403 snapshotmap15 = {0: [17, 19, 25], 8: [21], -1: [30]}
403 def findsnapshottest(rlog):
404 def findsnapshottest(rlog):
404 resultall = collections.defaultdict(list)
405 resultall = collections.defaultdict(list)
405 deltas._findsnapshots(rlog, resultall, 0)
406 deltas._findsnapshots(rlog, resultall, 0)
406 resultall = dict(resultall.items())
407 resultall = dict(resultall.items())
407 if resultall != snapshotmapall:
408 if resultall != snapshotmapall:
408 print('snapshot map differ:')
409 print('snapshot map differ:')
409 print(' expected: %s' % snapshotmapall)
410 print(' expected: %s' % snapshotmapall)
410 print(' got: %s' % resultall)
411 print(' got: %s' % resultall)
411 result15 = collections.defaultdict(list)
412 result15 = collections.defaultdict(list)
412 deltas._findsnapshots(rlog, result15, 15)
413 deltas._findsnapshots(rlog, result15, 15)
413 result15 = dict(result15.items())
414 result15 = dict(result15.items())
414 if result15 != snapshotmap15:
415 if result15 != snapshotmap15:
415 print('snapshot map differ:')
416 print('snapshot map differ:')
416 print(' expected: %s' % snapshotmap15)
417 print(' expected: %s' % snapshotmap15)
417 print(' got: %s' % result15)
418 print(' got: %s' % result15)
418
419
419 def maintest():
420 def maintest():
420 with newtransaction() as tr:
421 with newtransaction() as tr:
421 rl = newrevlog(recreate=True)
422 rl = newrevlog(recreate=True)
422 expected = writecases(rl, tr)
423 expected = writecases(rl, tr)
423 checkrevlog(rl, expected)
424 checkrevlog(rl, expected)
424 print('local test passed')
425 print('local test passed')
425 # Copy via revlog.addgroup
426 # Copy via revlog.addgroup
426 rl1 = addgroupcopy(rl, tr)
427 rl1 = addgroupcopy(rl, tr)
427 checkrevlog(rl1, expected)
428 checkrevlog(rl1, expected)
428 rl2 = addgroupcopy(rl, tr, optimaldelta=False)
429 rl2 = addgroupcopy(rl, tr, optimaldelta=False)
429 checkrevlog(rl2, expected)
430 checkrevlog(rl2, expected)
430 print('addgroupcopy test passed')
431 print('addgroupcopy test passed')
431 # Copy via revlog.clone
432 # Copy via revlog.clone
432 rl3 = newrevlog(name=b'_destrevlog3.i', recreate=True)
433 rl3 = newrevlog(name=b'_destrevlog3.i', recreate=True)
433 rl.clone(tr, rl3)
434 rl.clone(tr, rl3)
434 checkrevlog(rl3, expected)
435 checkrevlog(rl3, expected)
435 print('clone test passed')
436 print('clone test passed')
436 # Copy via low-level revlog._addrevision
437 # Copy via low-level revlog._addrevision
437 rl4 = lowlevelcopy(rl, tr)
438 rl4 = lowlevelcopy(rl, tr)
438 checkrevlog(rl4, expected)
439 checkrevlog(rl4, expected)
439 print('lowlevelcopy test passed')
440 print('lowlevelcopy test passed')
440 slicingtest(rl)
441 slicingtest(rl)
441 print('slicing test passed')
442 print('slicing test passed')
442 rl5 = makesnapshot(tr)
443 rl5 = makesnapshot(tr)
443 issnapshottest(rl5)
444 issnapshottest(rl5)
444 print('issnapshot test passed')
445 print('issnapshot test passed')
445 findsnapshottest(rl5)
446 findsnapshottest(rl5)
446 print('findsnapshot test passed')
447 print('findsnapshot test passed')
447
448
448 try:
449 try:
449 maintest()
450 maintest()
450 except Exception as ex:
451 except Exception as ex:
451 abort('crashed: %s' % ex)
452 abort('crashed: %s' % ex)
General Comments 0
You need to be logged in to leave comments. Login now