##// END OF EJS Templates
stream-clone: fix a crash when a repo with an empty revlog is cloned
Arseniy Alekseyev -
r51970:74c004a5 stable
parent child Browse files
Show More
@@ -1,3530 +1,3533 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 # coding: utf8
2 # coding: utf8
3 #
3 #
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 """Storage back-end for Mercurial.
9 """Storage back-end for Mercurial.
10
10
11 This provides efficient delta storage with O(1) retrieve and append
11 This provides efficient delta storage with O(1) retrieve and append
12 and O(changes) merge between branches.
12 and O(changes) merge between branches.
13 """
13 """
14
14
15
15
16 import binascii
16 import binascii
17 import collections
17 import collections
18 import contextlib
18 import contextlib
19 import io
19 import io
20 import os
20 import os
21 import struct
21 import struct
22 import weakref
22 import weakref
23 import zlib
23 import zlib
24
24
25 # import stuff from node for others to import from revlog
25 # import stuff from node for others to import from revlog
26 from .node import (
26 from .node import (
27 bin,
27 bin,
28 hex,
28 hex,
29 nullrev,
29 nullrev,
30 sha1nodeconstants,
30 sha1nodeconstants,
31 short,
31 short,
32 wdirrev,
32 wdirrev,
33 )
33 )
34 from .i18n import _
34 from .i18n import _
35 from .pycompat import getattr
35 from .pycompat import getattr
36 from .revlogutils.constants import (
36 from .revlogutils.constants import (
37 ALL_KINDS,
37 ALL_KINDS,
38 CHANGELOGV2,
38 CHANGELOGV2,
39 COMP_MODE_DEFAULT,
39 COMP_MODE_DEFAULT,
40 COMP_MODE_INLINE,
40 COMP_MODE_INLINE,
41 COMP_MODE_PLAIN,
41 COMP_MODE_PLAIN,
42 DELTA_BASE_REUSE_NO,
42 DELTA_BASE_REUSE_NO,
43 DELTA_BASE_REUSE_TRY,
43 DELTA_BASE_REUSE_TRY,
44 ENTRY_RANK,
44 ENTRY_RANK,
45 FEATURES_BY_VERSION,
45 FEATURES_BY_VERSION,
46 FLAG_GENERALDELTA,
46 FLAG_GENERALDELTA,
47 FLAG_INLINE_DATA,
47 FLAG_INLINE_DATA,
48 INDEX_HEADER,
48 INDEX_HEADER,
49 KIND_CHANGELOG,
49 KIND_CHANGELOG,
50 KIND_FILELOG,
50 KIND_FILELOG,
51 RANK_UNKNOWN,
51 RANK_UNKNOWN,
52 REVLOGV0,
52 REVLOGV0,
53 REVLOGV1,
53 REVLOGV1,
54 REVLOGV1_FLAGS,
54 REVLOGV1_FLAGS,
55 REVLOGV2,
55 REVLOGV2,
56 REVLOGV2_FLAGS,
56 REVLOGV2_FLAGS,
57 REVLOG_DEFAULT_FLAGS,
57 REVLOG_DEFAULT_FLAGS,
58 REVLOG_DEFAULT_FORMAT,
58 REVLOG_DEFAULT_FORMAT,
59 REVLOG_DEFAULT_VERSION,
59 REVLOG_DEFAULT_VERSION,
60 SUPPORTED_FLAGS,
60 SUPPORTED_FLAGS,
61 )
61 )
62 from .revlogutils.flagutil import (
62 from .revlogutils.flagutil import (
63 REVIDX_DEFAULT_FLAGS,
63 REVIDX_DEFAULT_FLAGS,
64 REVIDX_ELLIPSIS,
64 REVIDX_ELLIPSIS,
65 REVIDX_EXTSTORED,
65 REVIDX_EXTSTORED,
66 REVIDX_FLAGS_ORDER,
66 REVIDX_FLAGS_ORDER,
67 REVIDX_HASCOPIESINFO,
67 REVIDX_HASCOPIESINFO,
68 REVIDX_ISCENSORED,
68 REVIDX_ISCENSORED,
69 REVIDX_RAWTEXT_CHANGING_FLAGS,
69 REVIDX_RAWTEXT_CHANGING_FLAGS,
70 )
70 )
71 from .thirdparty import attr
71 from .thirdparty import attr
72 from . import (
72 from . import (
73 ancestor,
73 ancestor,
74 dagop,
74 dagop,
75 error,
75 error,
76 mdiff,
76 mdiff,
77 policy,
77 policy,
78 pycompat,
78 pycompat,
79 revlogutils,
79 revlogutils,
80 templatefilters,
80 templatefilters,
81 util,
81 util,
82 )
82 )
83 from .interfaces import (
83 from .interfaces import (
84 repository,
84 repository,
85 util as interfaceutil,
85 util as interfaceutil,
86 )
86 )
87 from .revlogutils import (
87 from .revlogutils import (
88 deltas as deltautil,
88 deltas as deltautil,
89 docket as docketutil,
89 docket as docketutil,
90 flagutil,
90 flagutil,
91 nodemap as nodemaputil,
91 nodemap as nodemaputil,
92 randomaccessfile,
92 randomaccessfile,
93 revlogv0,
93 revlogv0,
94 rewrite,
94 rewrite,
95 sidedata as sidedatautil,
95 sidedata as sidedatautil,
96 )
96 )
97 from .utils import (
97 from .utils import (
98 storageutil,
98 storageutil,
99 stringutil,
99 stringutil,
100 )
100 )
101
101
102 # blanked usage of all the name to prevent pyflakes constraints
102 # blanked usage of all the name to prevent pyflakes constraints
103 # We need these name available in the module for extensions.
103 # We need these name available in the module for extensions.
104
104
105 REVLOGV0
105 REVLOGV0
106 REVLOGV1
106 REVLOGV1
107 REVLOGV2
107 REVLOGV2
108 CHANGELOGV2
108 CHANGELOGV2
109 FLAG_INLINE_DATA
109 FLAG_INLINE_DATA
110 FLAG_GENERALDELTA
110 FLAG_GENERALDELTA
111 REVLOG_DEFAULT_FLAGS
111 REVLOG_DEFAULT_FLAGS
112 REVLOG_DEFAULT_FORMAT
112 REVLOG_DEFAULT_FORMAT
113 REVLOG_DEFAULT_VERSION
113 REVLOG_DEFAULT_VERSION
114 REVLOGV1_FLAGS
114 REVLOGV1_FLAGS
115 REVLOGV2_FLAGS
115 REVLOGV2_FLAGS
116 REVIDX_ISCENSORED
116 REVIDX_ISCENSORED
117 REVIDX_ELLIPSIS
117 REVIDX_ELLIPSIS
118 REVIDX_HASCOPIESINFO
118 REVIDX_HASCOPIESINFO
119 REVIDX_EXTSTORED
119 REVIDX_EXTSTORED
120 REVIDX_DEFAULT_FLAGS
120 REVIDX_DEFAULT_FLAGS
121 REVIDX_FLAGS_ORDER
121 REVIDX_FLAGS_ORDER
122 REVIDX_RAWTEXT_CHANGING_FLAGS
122 REVIDX_RAWTEXT_CHANGING_FLAGS
123
123
124 parsers = policy.importmod('parsers')
124 parsers = policy.importmod('parsers')
125 rustancestor = policy.importrust('ancestor')
125 rustancestor = policy.importrust('ancestor')
126 rustdagop = policy.importrust('dagop')
126 rustdagop = policy.importrust('dagop')
127 rustrevlog = policy.importrust('revlog')
127 rustrevlog = policy.importrust('revlog')
128
128
129 # Aliased for performance.
129 # Aliased for performance.
130 _zlibdecompress = zlib.decompress
130 _zlibdecompress = zlib.decompress
131
131
132 # max size of inline data embedded into a revlog
132 # max size of inline data embedded into a revlog
133 _maxinline = 131072
133 _maxinline = 131072
134
134
135 # Flag processors for REVIDX_ELLIPSIS.
135 # Flag processors for REVIDX_ELLIPSIS.
136 def ellipsisreadprocessor(rl, text):
136 def ellipsisreadprocessor(rl, text):
137 return text, False
137 return text, False
138
138
139
139
140 def ellipsiswriteprocessor(rl, text):
140 def ellipsiswriteprocessor(rl, text):
141 return text, False
141 return text, False
142
142
143
143
144 def ellipsisrawprocessor(rl, text):
144 def ellipsisrawprocessor(rl, text):
145 return False
145 return False
146
146
147
147
148 ellipsisprocessor = (
148 ellipsisprocessor = (
149 ellipsisreadprocessor,
149 ellipsisreadprocessor,
150 ellipsiswriteprocessor,
150 ellipsiswriteprocessor,
151 ellipsisrawprocessor,
151 ellipsisrawprocessor,
152 )
152 )
153
153
154
154
155 def _verify_revision(rl, skipflags, state, node):
155 def _verify_revision(rl, skipflags, state, node):
156 """Verify the integrity of the given revlog ``node`` while providing a hook
156 """Verify the integrity of the given revlog ``node`` while providing a hook
157 point for extensions to influence the operation."""
157 point for extensions to influence the operation."""
158 if skipflags:
158 if skipflags:
159 state[b'skipread'].add(node)
159 state[b'skipread'].add(node)
160 else:
160 else:
161 # Side-effect: read content and verify hash.
161 # Side-effect: read content and verify hash.
162 rl.revision(node)
162 rl.revision(node)
163
163
164
164
165 # True if a fast implementation for persistent-nodemap is available
165 # True if a fast implementation for persistent-nodemap is available
166 #
166 #
167 # We also consider we have a "fast" implementation in "pure" python because
167 # We also consider we have a "fast" implementation in "pure" python because
168 # people using pure don't really have performance consideration (and a
168 # people using pure don't really have performance consideration (and a
169 # wheelbarrow of other slowness source)
169 # wheelbarrow of other slowness source)
170 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
170 HAS_FAST_PERSISTENT_NODEMAP = rustrevlog is not None or util.safehasattr(
171 parsers, 'BaseIndexObject'
171 parsers, 'BaseIndexObject'
172 )
172 )
173
173
174
174
175 @interfaceutil.implementer(repository.irevisiondelta)
175 @interfaceutil.implementer(repository.irevisiondelta)
176 @attr.s(slots=True)
176 @attr.s(slots=True)
177 class revlogrevisiondelta:
177 class revlogrevisiondelta:
178 node = attr.ib()
178 node = attr.ib()
179 p1node = attr.ib()
179 p1node = attr.ib()
180 p2node = attr.ib()
180 p2node = attr.ib()
181 basenode = attr.ib()
181 basenode = attr.ib()
182 flags = attr.ib()
182 flags = attr.ib()
183 baserevisionsize = attr.ib()
183 baserevisionsize = attr.ib()
184 revision = attr.ib()
184 revision = attr.ib()
185 delta = attr.ib()
185 delta = attr.ib()
186 sidedata = attr.ib()
186 sidedata = attr.ib()
187 protocol_flags = attr.ib()
187 protocol_flags = attr.ib()
188 linknode = attr.ib(default=None)
188 linknode = attr.ib(default=None)
189
189
190
190
191 @interfaceutil.implementer(repository.iverifyproblem)
191 @interfaceutil.implementer(repository.iverifyproblem)
192 @attr.s(frozen=True)
192 @attr.s(frozen=True)
193 class revlogproblem:
193 class revlogproblem:
194 warning = attr.ib(default=None)
194 warning = attr.ib(default=None)
195 error = attr.ib(default=None)
195 error = attr.ib(default=None)
196 node = attr.ib(default=None)
196 node = attr.ib(default=None)
197
197
198
198
199 def parse_index_v1(data, inline):
199 def parse_index_v1(data, inline):
200 # call the C implementation to parse the index data
200 # call the C implementation to parse the index data
201 index, cache = parsers.parse_index2(data, inline)
201 index, cache = parsers.parse_index2(data, inline)
202 return index, cache
202 return index, cache
203
203
204
204
205 def parse_index_v2(data, inline):
205 def parse_index_v2(data, inline):
206 # call the C implementation to parse the index data
206 # call the C implementation to parse the index data
207 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
207 index, cache = parsers.parse_index2(data, inline, format=REVLOGV2)
208 return index, cache
208 return index, cache
209
209
210
210
211 def parse_index_cl_v2(data, inline):
211 def parse_index_cl_v2(data, inline):
212 # call the C implementation to parse the index data
212 # call the C implementation to parse the index data
213 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
213 index, cache = parsers.parse_index2(data, inline, format=CHANGELOGV2)
214 return index, cache
214 return index, cache
215
215
216
216
217 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
217 if util.safehasattr(parsers, 'parse_index_devel_nodemap'):
218
218
219 def parse_index_v1_nodemap(data, inline):
219 def parse_index_v1_nodemap(data, inline):
220 index, cache = parsers.parse_index_devel_nodemap(data, inline)
220 index, cache = parsers.parse_index_devel_nodemap(data, inline)
221 return index, cache
221 return index, cache
222
222
223
223
224 else:
224 else:
225 parse_index_v1_nodemap = None
225 parse_index_v1_nodemap = None
226
226
227
227
228 def parse_index_v1_mixed(data, inline):
228 def parse_index_v1_mixed(data, inline):
229 index, cache = parse_index_v1(data, inline)
229 index, cache = parse_index_v1(data, inline)
230 return rustrevlog.MixedIndex(index), cache
230 return rustrevlog.MixedIndex(index), cache
231
231
232
232
233 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
233 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
234 # signed integer)
234 # signed integer)
235 _maxentrysize = 0x7FFFFFFF
235 _maxentrysize = 0x7FFFFFFF
236
236
237 FILE_TOO_SHORT_MSG = _(
237 FILE_TOO_SHORT_MSG = _(
238 b'cannot read from revlog %s;'
238 b'cannot read from revlog %s;'
239 b' expected %d bytes from offset %d, data size is %d'
239 b' expected %d bytes from offset %d, data size is %d'
240 )
240 )
241
241
242 hexdigits = b'0123456789abcdefABCDEF'
242 hexdigits = b'0123456789abcdefABCDEF'
243
243
244
244
245 class revlog:
245 class revlog:
246 """
246 """
247 the underlying revision storage object
247 the underlying revision storage object
248
248
249 A revlog consists of two parts, an index and the revision data.
249 A revlog consists of two parts, an index and the revision data.
250
250
251 The index is a file with a fixed record size containing
251 The index is a file with a fixed record size containing
252 information on each revision, including its nodeid (hash), the
252 information on each revision, including its nodeid (hash), the
253 nodeids of its parents, the position and offset of its data within
253 nodeids of its parents, the position and offset of its data within
254 the data file, and the revision it's based on. Finally, each entry
254 the data file, and the revision it's based on. Finally, each entry
255 contains a linkrev entry that can serve as a pointer to external
255 contains a linkrev entry that can serve as a pointer to external
256 data.
256 data.
257
257
258 The revision data itself is a linear collection of data chunks.
258 The revision data itself is a linear collection of data chunks.
259 Each chunk represents a revision and is usually represented as a
259 Each chunk represents a revision and is usually represented as a
260 delta against the previous chunk. To bound lookup time, runs of
260 delta against the previous chunk. To bound lookup time, runs of
261 deltas are limited to about 2 times the length of the original
261 deltas are limited to about 2 times the length of the original
262 version data. This makes retrieval of a version proportional to
262 version data. This makes retrieval of a version proportional to
263 its size, or O(1) relative to the number of revisions.
263 its size, or O(1) relative to the number of revisions.
264
264
265 Both pieces of the revlog are written to in an append-only
265 Both pieces of the revlog are written to in an append-only
266 fashion, which means we never need to rewrite a file to insert or
266 fashion, which means we never need to rewrite a file to insert or
267 remove data, and can use some simple techniques to avoid the need
267 remove data, and can use some simple techniques to avoid the need
268 for locking while reading.
268 for locking while reading.
269
269
270 If checkambig, indexfile is opened with checkambig=True at
270 If checkambig, indexfile is opened with checkambig=True at
271 writing, to avoid file stat ambiguity.
271 writing, to avoid file stat ambiguity.
272
272
273 If mmaplargeindex is True, and an mmapindexthreshold is set, the
273 If mmaplargeindex is True, and an mmapindexthreshold is set, the
274 index will be mmapped rather than read if it is larger than the
274 index will be mmapped rather than read if it is larger than the
275 configured threshold.
275 configured threshold.
276
276
277 If censorable is True, the revlog can have censored revisions.
277 If censorable is True, the revlog can have censored revisions.
278
278
279 If `upperboundcomp` is not None, this is the expected maximal gain from
279 If `upperboundcomp` is not None, this is the expected maximal gain from
280 compression for the data content.
280 compression for the data content.
281
281
282 `concurrencychecker` is an optional function that receives 3 arguments: a
282 `concurrencychecker` is an optional function that receives 3 arguments: a
283 file handle, a filename, and an expected position. It should check whether
283 file handle, a filename, and an expected position. It should check whether
284 the current position in the file handle is valid, and log/warn/fail (by
284 the current position in the file handle is valid, and log/warn/fail (by
285 raising).
285 raising).
286
286
287 See mercurial/revlogutils/contants.py for details about the content of an
287 See mercurial/revlogutils/contants.py for details about the content of an
288 index entry.
288 index entry.
289 """
289 """
290
290
291 _flagserrorclass = error.RevlogError
291 _flagserrorclass = error.RevlogError
292
292
293 @staticmethod
293 @staticmethod
294 def is_inline_index(header_bytes):
294 def is_inline_index(header_bytes):
295 if len(header_bytes) == 0:
296 return True
297
295 header = INDEX_HEADER.unpack(header_bytes)[0]
298 header = INDEX_HEADER.unpack(header_bytes)[0]
296
299
297 _format_flags = header & ~0xFFFF
300 _format_flags = header & ~0xFFFF
298 _format_version = header & 0xFFFF
301 _format_version = header & 0xFFFF
299
302
300 features = FEATURES_BY_VERSION[_format_version]
303 features = FEATURES_BY_VERSION[_format_version]
301 return features[b'inline'](_format_flags)
304 return features[b'inline'](_format_flags)
302
305
303 def __init__(
306 def __init__(
304 self,
307 self,
305 opener,
308 opener,
306 target,
309 target,
307 radix,
310 radix,
308 postfix=None, # only exist for `tmpcensored` now
311 postfix=None, # only exist for `tmpcensored` now
309 checkambig=False,
312 checkambig=False,
310 mmaplargeindex=False,
313 mmaplargeindex=False,
311 censorable=False,
314 censorable=False,
312 upperboundcomp=None,
315 upperboundcomp=None,
313 persistentnodemap=False,
316 persistentnodemap=False,
314 concurrencychecker=None,
317 concurrencychecker=None,
315 trypending=False,
318 trypending=False,
316 try_split=False,
319 try_split=False,
317 canonical_parent_order=True,
320 canonical_parent_order=True,
318 ):
321 ):
319 """
322 """
320 create a revlog object
323 create a revlog object
321
324
322 opener is a function that abstracts the file opening operation
325 opener is a function that abstracts the file opening operation
323 and can be used to implement COW semantics or the like.
326 and can be used to implement COW semantics or the like.
324
327
325 `target`: a (KIND, ID) tuple that identify the content stored in
328 `target`: a (KIND, ID) tuple that identify the content stored in
326 this revlog. It help the rest of the code to understand what the revlog
329 this revlog. It help the rest of the code to understand what the revlog
327 is about without having to resort to heuristic and index filename
330 is about without having to resort to heuristic and index filename
328 analysis. Note: that this must be reliably be set by normal code, but
331 analysis. Note: that this must be reliably be set by normal code, but
329 that test, debug, or performance measurement code might not set this to
332 that test, debug, or performance measurement code might not set this to
330 accurate value.
333 accurate value.
331 """
334 """
332 self.upperboundcomp = upperboundcomp
335 self.upperboundcomp = upperboundcomp
333
336
334 self.radix = radix
337 self.radix = radix
335
338
336 self._docket_file = None
339 self._docket_file = None
337 self._indexfile = None
340 self._indexfile = None
338 self._datafile = None
341 self._datafile = None
339 self._sidedatafile = None
342 self._sidedatafile = None
340 self._nodemap_file = None
343 self._nodemap_file = None
341 self.postfix = postfix
344 self.postfix = postfix
342 self._trypending = trypending
345 self._trypending = trypending
343 self._try_split = try_split
346 self._try_split = try_split
344 self.opener = opener
347 self.opener = opener
345 if persistentnodemap:
348 if persistentnodemap:
346 self._nodemap_file = nodemaputil.get_nodemap_file(self)
349 self._nodemap_file = nodemaputil.get_nodemap_file(self)
347
350
348 assert target[0] in ALL_KINDS
351 assert target[0] in ALL_KINDS
349 assert len(target) == 2
352 assert len(target) == 2
350 self.target = target
353 self.target = target
351 # When True, indexfile is opened with checkambig=True at writing, to
354 # When True, indexfile is opened with checkambig=True at writing, to
352 # avoid file stat ambiguity.
355 # avoid file stat ambiguity.
353 self._checkambig = checkambig
356 self._checkambig = checkambig
354 self._mmaplargeindex = mmaplargeindex
357 self._mmaplargeindex = mmaplargeindex
355 self._censorable = censorable
358 self._censorable = censorable
356 # 3-tuple of (node, rev, text) for a raw revision.
359 # 3-tuple of (node, rev, text) for a raw revision.
357 self._revisioncache = None
360 self._revisioncache = None
358 # Maps rev to chain base rev.
361 # Maps rev to chain base rev.
359 self._chainbasecache = util.lrucachedict(100)
362 self._chainbasecache = util.lrucachedict(100)
360 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
363 # 2-tuple of (offset, data) of raw data from the revlog at an offset.
361 self._chunkcache = (0, b'')
364 self._chunkcache = (0, b'')
362 # How much data to read and cache into the raw revlog data cache.
365 # How much data to read and cache into the raw revlog data cache.
363 self._chunkcachesize = 65536
366 self._chunkcachesize = 65536
364 self._maxchainlen = None
367 self._maxchainlen = None
365 self._deltabothparents = True
368 self._deltabothparents = True
366 self._candidate_group_chunk_size = 0
369 self._candidate_group_chunk_size = 0
367 self._debug_delta = False
370 self._debug_delta = False
368 self.index = None
371 self.index = None
369 self._docket = None
372 self._docket = None
370 self._nodemap_docket = None
373 self._nodemap_docket = None
371 # Mapping of partial identifiers to full nodes.
374 # Mapping of partial identifiers to full nodes.
372 self._pcache = {}
375 self._pcache = {}
373 # Mapping of revision integer to full node.
376 # Mapping of revision integer to full node.
374 self._compengine = b'zlib'
377 self._compengine = b'zlib'
375 self._compengineopts = {}
378 self._compengineopts = {}
376 self._maxdeltachainspan = -1
379 self._maxdeltachainspan = -1
377 self._withsparseread = False
380 self._withsparseread = False
378 self._sparserevlog = False
381 self._sparserevlog = False
379 self.hassidedata = False
382 self.hassidedata = False
380 self._srdensitythreshold = 0.50
383 self._srdensitythreshold = 0.50
381 self._srmingapsize = 262144
384 self._srmingapsize = 262144
382
385
383 # other optionnals features
386 # other optionnals features
384
387
385 # might remove rank configuration once the computation has no impact
388 # might remove rank configuration once the computation has no impact
386 self._compute_rank = False
389 self._compute_rank = False
387
390
388 # Make copy of flag processors so each revlog instance can support
391 # Make copy of flag processors so each revlog instance can support
389 # custom flags.
392 # custom flags.
390 self._flagprocessors = dict(flagutil.flagprocessors)
393 self._flagprocessors = dict(flagutil.flagprocessors)
391
394
392 # 3-tuple of file handles being used for active writing.
395 # 3-tuple of file handles being used for active writing.
393 self._writinghandles = None
396 self._writinghandles = None
394 # prevent nesting of addgroup
397 # prevent nesting of addgroup
395 self._adding_group = None
398 self._adding_group = None
396
399
397 self._loadindex()
400 self._loadindex()
398
401
399 self._concurrencychecker = concurrencychecker
402 self._concurrencychecker = concurrencychecker
400
403
401 # parent order is supposed to be semantically irrelevant, so we
404 # parent order is supposed to be semantically irrelevant, so we
402 # normally resort parents to ensure that the first parent is non-null,
405 # normally resort parents to ensure that the first parent is non-null,
403 # if there is a non-null parent at all.
406 # if there is a non-null parent at all.
404 # filelog abuses the parent order as flag to mark some instances of
407 # filelog abuses the parent order as flag to mark some instances of
405 # meta-encoded files, so allow it to disable this behavior.
408 # meta-encoded files, so allow it to disable this behavior.
406 self.canonical_parent_order = canonical_parent_order
409 self.canonical_parent_order = canonical_parent_order
407
410
408 def _init_opts(self):
411 def _init_opts(self):
409 """process options (from above/config) to setup associated default revlog mode
412 """process options (from above/config) to setup associated default revlog mode
410
413
411 These values might be affected when actually reading on disk information.
414 These values might be affected when actually reading on disk information.
412
415
413 The relevant values are returned for use in _loadindex().
416 The relevant values are returned for use in _loadindex().
414
417
415 * newversionflags:
418 * newversionflags:
416 version header to use if we need to create a new revlog
419 version header to use if we need to create a new revlog
417
420
418 * mmapindexthreshold:
421 * mmapindexthreshold:
419 minimal index size for start to use mmap
422 minimal index size for start to use mmap
420
423
421 * force_nodemap:
424 * force_nodemap:
422 force the usage of a "development" version of the nodemap code
425 force the usage of a "development" version of the nodemap code
423 """
426 """
424 mmapindexthreshold = None
427 mmapindexthreshold = None
425 opts = self.opener.options
428 opts = self.opener.options
426
429
427 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
430 if b'changelogv2' in opts and self.revlog_kind == KIND_CHANGELOG:
428 new_header = CHANGELOGV2
431 new_header = CHANGELOGV2
429 self._compute_rank = opts.get(b'changelogv2.compute-rank', True)
432 self._compute_rank = opts.get(b'changelogv2.compute-rank', True)
430 elif b'revlogv2' in opts:
433 elif b'revlogv2' in opts:
431 new_header = REVLOGV2
434 new_header = REVLOGV2
432 elif b'revlogv1' in opts:
435 elif b'revlogv1' in opts:
433 new_header = REVLOGV1 | FLAG_INLINE_DATA
436 new_header = REVLOGV1 | FLAG_INLINE_DATA
434 if b'generaldelta' in opts:
437 if b'generaldelta' in opts:
435 new_header |= FLAG_GENERALDELTA
438 new_header |= FLAG_GENERALDELTA
436 elif b'revlogv0' in self.opener.options:
439 elif b'revlogv0' in self.opener.options:
437 new_header = REVLOGV0
440 new_header = REVLOGV0
438 else:
441 else:
439 new_header = REVLOG_DEFAULT_VERSION
442 new_header = REVLOG_DEFAULT_VERSION
440
443
441 if b'chunkcachesize' in opts:
444 if b'chunkcachesize' in opts:
442 self._chunkcachesize = opts[b'chunkcachesize']
445 self._chunkcachesize = opts[b'chunkcachesize']
443 if b'maxchainlen' in opts:
446 if b'maxchainlen' in opts:
444 self._maxchainlen = opts[b'maxchainlen']
447 self._maxchainlen = opts[b'maxchainlen']
445 if b'deltabothparents' in opts:
448 if b'deltabothparents' in opts:
446 self._deltabothparents = opts[b'deltabothparents']
449 self._deltabothparents = opts[b'deltabothparents']
447 dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size')
450 dps_cgds = opts.get(b'delta-parent-search.candidate-group-chunk-size')
448 if dps_cgds:
451 if dps_cgds:
449 self._candidate_group_chunk_size = dps_cgds
452 self._candidate_group_chunk_size = dps_cgds
450 self._lazydelta = bool(opts.get(b'lazydelta', True))
453 self._lazydelta = bool(opts.get(b'lazydelta', True))
451 self._lazydeltabase = False
454 self._lazydeltabase = False
452 if self._lazydelta:
455 if self._lazydelta:
453 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
456 self._lazydeltabase = bool(opts.get(b'lazydeltabase', False))
454 if b'debug-delta' in opts:
457 if b'debug-delta' in opts:
455 self._debug_delta = opts[b'debug-delta']
458 self._debug_delta = opts[b'debug-delta']
456 if b'compengine' in opts:
459 if b'compengine' in opts:
457 self._compengine = opts[b'compengine']
460 self._compengine = opts[b'compengine']
458 if b'zlib.level' in opts:
461 if b'zlib.level' in opts:
459 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
462 self._compengineopts[b'zlib.level'] = opts[b'zlib.level']
460 if b'zstd.level' in opts:
463 if b'zstd.level' in opts:
461 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
464 self._compengineopts[b'zstd.level'] = opts[b'zstd.level']
462 if b'maxdeltachainspan' in opts:
465 if b'maxdeltachainspan' in opts:
463 self._maxdeltachainspan = opts[b'maxdeltachainspan']
466 self._maxdeltachainspan = opts[b'maxdeltachainspan']
464 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
467 if self._mmaplargeindex and b'mmapindexthreshold' in opts:
465 mmapindexthreshold = opts[b'mmapindexthreshold']
468 mmapindexthreshold = opts[b'mmapindexthreshold']
466 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
469 self._sparserevlog = bool(opts.get(b'sparse-revlog', False))
467 withsparseread = bool(opts.get(b'with-sparse-read', False))
470 withsparseread = bool(opts.get(b'with-sparse-read', False))
468 # sparse-revlog forces sparse-read
471 # sparse-revlog forces sparse-read
469 self._withsparseread = self._sparserevlog or withsparseread
472 self._withsparseread = self._sparserevlog or withsparseread
470 if b'sparse-read-density-threshold' in opts:
473 if b'sparse-read-density-threshold' in opts:
471 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
474 self._srdensitythreshold = opts[b'sparse-read-density-threshold']
472 if b'sparse-read-min-gap-size' in opts:
475 if b'sparse-read-min-gap-size' in opts:
473 self._srmingapsize = opts[b'sparse-read-min-gap-size']
476 self._srmingapsize = opts[b'sparse-read-min-gap-size']
474 if opts.get(b'enableellipsis'):
477 if opts.get(b'enableellipsis'):
475 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
478 self._flagprocessors[REVIDX_ELLIPSIS] = ellipsisprocessor
476
479
477 # revlog v0 doesn't have flag processors
480 # revlog v0 doesn't have flag processors
478 for flag, processor in opts.get(b'flagprocessors', {}).items():
481 for flag, processor in opts.get(b'flagprocessors', {}).items():
479 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
482 flagutil.insertflagprocessor(flag, processor, self._flagprocessors)
480
483
481 if self._chunkcachesize <= 0:
484 if self._chunkcachesize <= 0:
482 raise error.RevlogError(
485 raise error.RevlogError(
483 _(b'revlog chunk cache size %r is not greater than 0')
486 _(b'revlog chunk cache size %r is not greater than 0')
484 % self._chunkcachesize
487 % self._chunkcachesize
485 )
488 )
486 elif self._chunkcachesize & (self._chunkcachesize - 1):
489 elif self._chunkcachesize & (self._chunkcachesize - 1):
487 raise error.RevlogError(
490 raise error.RevlogError(
488 _(b'revlog chunk cache size %r is not a power of 2')
491 _(b'revlog chunk cache size %r is not a power of 2')
489 % self._chunkcachesize
492 % self._chunkcachesize
490 )
493 )
491 force_nodemap = opts.get(b'devel-force-nodemap', False)
494 force_nodemap = opts.get(b'devel-force-nodemap', False)
492 return new_header, mmapindexthreshold, force_nodemap
495 return new_header, mmapindexthreshold, force_nodemap
493
496
494 def _get_data(self, filepath, mmap_threshold, size=None):
497 def _get_data(self, filepath, mmap_threshold, size=None):
495 """return a file content with or without mmap
498 """return a file content with or without mmap
496
499
497 If the file is missing return the empty string"""
500 If the file is missing return the empty string"""
498 try:
501 try:
499 with self.opener(filepath) as fp:
502 with self.opener(filepath) as fp:
500 if mmap_threshold is not None:
503 if mmap_threshold is not None:
501 file_size = self.opener.fstat(fp).st_size
504 file_size = self.opener.fstat(fp).st_size
502 if file_size >= mmap_threshold:
505 if file_size >= mmap_threshold:
503 if size is not None:
506 if size is not None:
504 # avoid potentiel mmap crash
507 # avoid potentiel mmap crash
505 size = min(file_size, size)
508 size = min(file_size, size)
506 # TODO: should .close() to release resources without
509 # TODO: should .close() to release resources without
507 # relying on Python GC
510 # relying on Python GC
508 if size is None:
511 if size is None:
509 return util.buffer(util.mmapread(fp))
512 return util.buffer(util.mmapread(fp))
510 else:
513 else:
511 return util.buffer(util.mmapread(fp, size))
514 return util.buffer(util.mmapread(fp, size))
512 if size is None:
515 if size is None:
513 return fp.read()
516 return fp.read()
514 else:
517 else:
515 return fp.read(size)
518 return fp.read(size)
516 except FileNotFoundError:
519 except FileNotFoundError:
517 return b''
520 return b''
518
521
519 def get_streams(self, max_linkrev, force_inline=False):
522 def get_streams(self, max_linkrev, force_inline=False):
520 n = len(self)
523 n = len(self)
521 index = self.index
524 index = self.index
522 while n > 0:
525 while n > 0:
523 linkrev = index[n - 1][4]
526 linkrev = index[n - 1][4]
524 if linkrev < max_linkrev:
527 if linkrev < max_linkrev:
525 break
528 break
526 # note: this loop will rarely go through multiple iterations, since
529 # note: this loop will rarely go through multiple iterations, since
527 # it only traverses commits created during the current streaming
530 # it only traverses commits created during the current streaming
528 # pull operation.
531 # pull operation.
529 #
532 #
530 # If this become a problem, using a binary search should cap the
533 # If this become a problem, using a binary search should cap the
531 # runtime of this.
534 # runtime of this.
532 n = n - 1
535 n = n - 1
533 if n == 0:
536 if n == 0:
534 # no data to send
537 # no data to send
535 return []
538 return []
536 index_size = n * index.entry_size
539 index_size = n * index.entry_size
537 data_size = self.end(n - 1)
540 data_size = self.end(n - 1)
538
541
539 # XXX we might have been split (or stripped) since the object
542 # XXX we might have been split (or stripped) since the object
540 # initialization, We need to close this race too, but having a way to
543 # initialization, We need to close this race too, but having a way to
541 # pre-open the file we feed to the revlog and never closing them before
544 # pre-open the file we feed to the revlog and never closing them before
542 # we are done streaming.
545 # we are done streaming.
543
546
544 if self._inline:
547 if self._inline:
545
548
546 def get_stream():
549 def get_stream():
547 with self._indexfp() as fp:
550 with self._indexfp() as fp:
548 yield None
551 yield None
549 size = index_size + data_size
552 size = index_size + data_size
550 if size <= 65536:
553 if size <= 65536:
551 yield fp.read(size)
554 yield fp.read(size)
552 else:
555 else:
553 yield from util.filechunkiter(fp, limit=size)
556 yield from util.filechunkiter(fp, limit=size)
554
557
555 inline_stream = get_stream()
558 inline_stream = get_stream()
556 next(inline_stream)
559 next(inline_stream)
557 return [
560 return [
558 (self._indexfile, inline_stream, index_size + data_size),
561 (self._indexfile, inline_stream, index_size + data_size),
559 ]
562 ]
560 elif force_inline:
563 elif force_inline:
561
564
562 def get_stream():
565 def get_stream():
563 with self._datafp() as fp_d:
566 with self._datafp() as fp_d:
564 yield None
567 yield None
565
568
566 for rev in range(n):
569 for rev in range(n):
567 idx = self.index.entry_binary(rev)
570 idx = self.index.entry_binary(rev)
568 if rev == 0 and self._docket is None:
571 if rev == 0 and self._docket is None:
569 # re-inject the inline flag
572 # re-inject the inline flag
570 header = self._format_flags
573 header = self._format_flags
571 header |= self._format_version
574 header |= self._format_version
572 header |= FLAG_INLINE_DATA
575 header |= FLAG_INLINE_DATA
573 header = self.index.pack_header(header)
576 header = self.index.pack_header(header)
574 idx = header + idx
577 idx = header + idx
575 yield idx
578 yield idx
576 yield self._getsegmentforrevs(rev, rev, df=fp_d)[1]
579 yield self._getsegmentforrevs(rev, rev, df=fp_d)[1]
577
580
578 inline_stream = get_stream()
581 inline_stream = get_stream()
579 next(inline_stream)
582 next(inline_stream)
580 return [
583 return [
581 (self._indexfile, inline_stream, index_size + data_size),
584 (self._indexfile, inline_stream, index_size + data_size),
582 ]
585 ]
583 else:
586 else:
584
587
585 def get_index_stream():
588 def get_index_stream():
586 with self._indexfp() as fp:
589 with self._indexfp() as fp:
587 yield None
590 yield None
588 if index_size <= 65536:
591 if index_size <= 65536:
589 yield fp.read(index_size)
592 yield fp.read(index_size)
590 else:
593 else:
591 yield from util.filechunkiter(fp, limit=index_size)
594 yield from util.filechunkiter(fp, limit=index_size)
592
595
593 def get_data_stream():
596 def get_data_stream():
594 with self._datafp() as fp:
597 with self._datafp() as fp:
595 yield None
598 yield None
596 if data_size <= 65536:
599 if data_size <= 65536:
597 yield fp.read(data_size)
600 yield fp.read(data_size)
598 else:
601 else:
599 yield from util.filechunkiter(fp, limit=data_size)
602 yield from util.filechunkiter(fp, limit=data_size)
600
603
601 index_stream = get_index_stream()
604 index_stream = get_index_stream()
602 next(index_stream)
605 next(index_stream)
603 data_stream = get_data_stream()
606 data_stream = get_data_stream()
604 next(data_stream)
607 next(data_stream)
605 return [
608 return [
606 (self._datafile, data_stream, data_size),
609 (self._datafile, data_stream, data_size),
607 (self._indexfile, index_stream, index_size),
610 (self._indexfile, index_stream, index_size),
608 ]
611 ]
609
612
610 def _loadindex(self, docket=None):
613 def _loadindex(self, docket=None):
611
614
612 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
615 new_header, mmapindexthreshold, force_nodemap = self._init_opts()
613
616
614 if self.postfix is not None:
617 if self.postfix is not None:
615 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
618 entry_point = b'%s.i.%s' % (self.radix, self.postfix)
616 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
619 elif self._trypending and self.opener.exists(b'%s.i.a' % self.radix):
617 entry_point = b'%s.i.a' % self.radix
620 entry_point = b'%s.i.a' % self.radix
618 elif self._try_split and self.opener.exists(self._split_index_file):
621 elif self._try_split and self.opener.exists(self._split_index_file):
619 entry_point = self._split_index_file
622 entry_point = self._split_index_file
620 else:
623 else:
621 entry_point = b'%s.i' % self.radix
624 entry_point = b'%s.i' % self.radix
622
625
623 if docket is not None:
626 if docket is not None:
624 self._docket = docket
627 self._docket = docket
625 self._docket_file = entry_point
628 self._docket_file = entry_point
626 else:
629 else:
627 self._initempty = True
630 self._initempty = True
628 entry_data = self._get_data(entry_point, mmapindexthreshold)
631 entry_data = self._get_data(entry_point, mmapindexthreshold)
629 if len(entry_data) > 0:
632 if len(entry_data) > 0:
630 header = INDEX_HEADER.unpack(entry_data[:4])[0]
633 header = INDEX_HEADER.unpack(entry_data[:4])[0]
631 self._initempty = False
634 self._initempty = False
632 else:
635 else:
633 header = new_header
636 header = new_header
634
637
635 self._format_flags = header & ~0xFFFF
638 self._format_flags = header & ~0xFFFF
636 self._format_version = header & 0xFFFF
639 self._format_version = header & 0xFFFF
637
640
638 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
641 supported_flags = SUPPORTED_FLAGS.get(self._format_version)
639 if supported_flags is None:
642 if supported_flags is None:
640 msg = _(b'unknown version (%d) in revlog %s')
643 msg = _(b'unknown version (%d) in revlog %s')
641 msg %= (self._format_version, self.display_id)
644 msg %= (self._format_version, self.display_id)
642 raise error.RevlogError(msg)
645 raise error.RevlogError(msg)
643 elif self._format_flags & ~supported_flags:
646 elif self._format_flags & ~supported_flags:
644 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
647 msg = _(b'unknown flags (%#04x) in version %d revlog %s')
645 display_flag = self._format_flags >> 16
648 display_flag = self._format_flags >> 16
646 msg %= (display_flag, self._format_version, self.display_id)
649 msg %= (display_flag, self._format_version, self.display_id)
647 raise error.RevlogError(msg)
650 raise error.RevlogError(msg)
648
651
649 features = FEATURES_BY_VERSION[self._format_version]
652 features = FEATURES_BY_VERSION[self._format_version]
650 self._inline = features[b'inline'](self._format_flags)
653 self._inline = features[b'inline'](self._format_flags)
651 self._generaldelta = features[b'generaldelta'](self._format_flags)
654 self._generaldelta = features[b'generaldelta'](self._format_flags)
652 self.hassidedata = features[b'sidedata']
655 self.hassidedata = features[b'sidedata']
653
656
654 if not features[b'docket']:
657 if not features[b'docket']:
655 self._indexfile = entry_point
658 self._indexfile = entry_point
656 index_data = entry_data
659 index_data = entry_data
657 else:
660 else:
658 self._docket_file = entry_point
661 self._docket_file = entry_point
659 if self._initempty:
662 if self._initempty:
660 self._docket = docketutil.default_docket(self, header)
663 self._docket = docketutil.default_docket(self, header)
661 else:
664 else:
662 self._docket = docketutil.parse_docket(
665 self._docket = docketutil.parse_docket(
663 self, entry_data, use_pending=self._trypending
666 self, entry_data, use_pending=self._trypending
664 )
667 )
665
668
666 if self._docket is not None:
669 if self._docket is not None:
667 self._indexfile = self._docket.index_filepath()
670 self._indexfile = self._docket.index_filepath()
668 index_data = b''
671 index_data = b''
669 index_size = self._docket.index_end
672 index_size = self._docket.index_end
670 if index_size > 0:
673 if index_size > 0:
671 index_data = self._get_data(
674 index_data = self._get_data(
672 self._indexfile, mmapindexthreshold, size=index_size
675 self._indexfile, mmapindexthreshold, size=index_size
673 )
676 )
674 if len(index_data) < index_size:
677 if len(index_data) < index_size:
675 msg = _(b'too few index data for %s: got %d, expected %d')
678 msg = _(b'too few index data for %s: got %d, expected %d')
676 msg %= (self.display_id, len(index_data), index_size)
679 msg %= (self.display_id, len(index_data), index_size)
677 raise error.RevlogError(msg)
680 raise error.RevlogError(msg)
678
681
679 self._inline = False
682 self._inline = False
680 # generaldelta implied by version 2 revlogs.
683 # generaldelta implied by version 2 revlogs.
681 self._generaldelta = True
684 self._generaldelta = True
682 # the logic for persistent nodemap will be dealt with within the
685 # the logic for persistent nodemap will be dealt with within the
683 # main docket, so disable it for now.
686 # main docket, so disable it for now.
684 self._nodemap_file = None
687 self._nodemap_file = None
685
688
686 if self._docket is not None:
689 if self._docket is not None:
687 self._datafile = self._docket.data_filepath()
690 self._datafile = self._docket.data_filepath()
688 self._sidedatafile = self._docket.sidedata_filepath()
691 self._sidedatafile = self._docket.sidedata_filepath()
689 elif self.postfix is None:
692 elif self.postfix is None:
690 self._datafile = b'%s.d' % self.radix
693 self._datafile = b'%s.d' % self.radix
691 else:
694 else:
692 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
695 self._datafile = b'%s.d.%s' % (self.radix, self.postfix)
693
696
694 self.nodeconstants = sha1nodeconstants
697 self.nodeconstants = sha1nodeconstants
695 self.nullid = self.nodeconstants.nullid
698 self.nullid = self.nodeconstants.nullid
696
699
697 # sparse-revlog can't be on without general-delta (issue6056)
700 # sparse-revlog can't be on without general-delta (issue6056)
698 if not self._generaldelta:
701 if not self._generaldelta:
699 self._sparserevlog = False
702 self._sparserevlog = False
700
703
701 self._storedeltachains = True
704 self._storedeltachains = True
702
705
703 devel_nodemap = (
706 devel_nodemap = (
704 self._nodemap_file
707 self._nodemap_file
705 and force_nodemap
708 and force_nodemap
706 and parse_index_v1_nodemap is not None
709 and parse_index_v1_nodemap is not None
707 )
710 )
708
711
709 use_rust_index = False
712 use_rust_index = False
710 if rustrevlog is not None:
713 if rustrevlog is not None:
711 if self._nodemap_file is not None:
714 if self._nodemap_file is not None:
712 use_rust_index = True
715 use_rust_index = True
713 else:
716 else:
714 use_rust_index = self.opener.options.get(b'rust.index')
717 use_rust_index = self.opener.options.get(b'rust.index')
715
718
716 self._parse_index = parse_index_v1
719 self._parse_index = parse_index_v1
717 if self._format_version == REVLOGV0:
720 if self._format_version == REVLOGV0:
718 self._parse_index = revlogv0.parse_index_v0
721 self._parse_index = revlogv0.parse_index_v0
719 elif self._format_version == REVLOGV2:
722 elif self._format_version == REVLOGV2:
720 self._parse_index = parse_index_v2
723 self._parse_index = parse_index_v2
721 elif self._format_version == CHANGELOGV2:
724 elif self._format_version == CHANGELOGV2:
722 self._parse_index = parse_index_cl_v2
725 self._parse_index = parse_index_cl_v2
723 elif devel_nodemap:
726 elif devel_nodemap:
724 self._parse_index = parse_index_v1_nodemap
727 self._parse_index = parse_index_v1_nodemap
725 elif use_rust_index:
728 elif use_rust_index:
726 self._parse_index = parse_index_v1_mixed
729 self._parse_index = parse_index_v1_mixed
727 try:
730 try:
728 d = self._parse_index(index_data, self._inline)
731 d = self._parse_index(index_data, self._inline)
729 index, chunkcache = d
732 index, chunkcache = d
730 use_nodemap = (
733 use_nodemap = (
731 not self._inline
734 not self._inline
732 and self._nodemap_file is not None
735 and self._nodemap_file is not None
733 and util.safehasattr(index, 'update_nodemap_data')
736 and util.safehasattr(index, 'update_nodemap_data')
734 )
737 )
735 if use_nodemap:
738 if use_nodemap:
736 nodemap_data = nodemaputil.persisted_data(self)
739 nodemap_data = nodemaputil.persisted_data(self)
737 if nodemap_data is not None:
740 if nodemap_data is not None:
738 docket = nodemap_data[0]
741 docket = nodemap_data[0]
739 if (
742 if (
740 len(d[0]) > docket.tip_rev
743 len(d[0]) > docket.tip_rev
741 and d[0][docket.tip_rev][7] == docket.tip_node
744 and d[0][docket.tip_rev][7] == docket.tip_node
742 ):
745 ):
743 # no changelog tampering
746 # no changelog tampering
744 self._nodemap_docket = docket
747 self._nodemap_docket = docket
745 index.update_nodemap_data(*nodemap_data)
748 index.update_nodemap_data(*nodemap_data)
746 except (ValueError, IndexError):
749 except (ValueError, IndexError):
747 raise error.RevlogError(
750 raise error.RevlogError(
748 _(b"index %s is corrupted") % self.display_id
751 _(b"index %s is corrupted") % self.display_id
749 )
752 )
750 self.index = index
753 self.index = index
751 self._segmentfile = randomaccessfile.randomaccessfile(
754 self._segmentfile = randomaccessfile.randomaccessfile(
752 self.opener,
755 self.opener,
753 (self._indexfile if self._inline else self._datafile),
756 (self._indexfile if self._inline else self._datafile),
754 self._chunkcachesize,
757 self._chunkcachesize,
755 chunkcache,
758 chunkcache,
756 )
759 )
757 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
760 self._segmentfile_sidedata = randomaccessfile.randomaccessfile(
758 self.opener,
761 self.opener,
759 self._sidedatafile,
762 self._sidedatafile,
760 self._chunkcachesize,
763 self._chunkcachesize,
761 )
764 )
762 # revnum -> (chain-length, sum-delta-length)
765 # revnum -> (chain-length, sum-delta-length)
763 self._chaininfocache = util.lrucachedict(500)
766 self._chaininfocache = util.lrucachedict(500)
764 # revlog header -> revlog compressor
767 # revlog header -> revlog compressor
765 self._decompressors = {}
768 self._decompressors = {}
766
769
767 def get_revlog(self):
770 def get_revlog(self):
768 """simple function to mirror API of other not-really-revlog API"""
771 """simple function to mirror API of other not-really-revlog API"""
769 return self
772 return self
770
773
771 @util.propertycache
774 @util.propertycache
772 def revlog_kind(self):
775 def revlog_kind(self):
773 return self.target[0]
776 return self.target[0]
774
777
775 @util.propertycache
778 @util.propertycache
776 def display_id(self):
779 def display_id(self):
777 """The public facing "ID" of the revlog that we use in message"""
780 """The public facing "ID" of the revlog that we use in message"""
778 if self.revlog_kind == KIND_FILELOG:
781 if self.revlog_kind == KIND_FILELOG:
779 # Reference the file without the "data/" prefix, so it is familiar
782 # Reference the file without the "data/" prefix, so it is familiar
780 # to the user.
783 # to the user.
781 return self.target[1]
784 return self.target[1]
782 else:
785 else:
783 return self.radix
786 return self.radix
784
787
785 def _get_decompressor(self, t):
788 def _get_decompressor(self, t):
786 try:
789 try:
787 compressor = self._decompressors[t]
790 compressor = self._decompressors[t]
788 except KeyError:
791 except KeyError:
789 try:
792 try:
790 engine = util.compengines.forrevlogheader(t)
793 engine = util.compengines.forrevlogheader(t)
791 compressor = engine.revlogcompressor(self._compengineopts)
794 compressor = engine.revlogcompressor(self._compengineopts)
792 self._decompressors[t] = compressor
795 self._decompressors[t] = compressor
793 except KeyError:
796 except KeyError:
794 raise error.RevlogError(
797 raise error.RevlogError(
795 _(b'unknown compression type %s') % binascii.hexlify(t)
798 _(b'unknown compression type %s') % binascii.hexlify(t)
796 )
799 )
797 return compressor
800 return compressor
798
801
799 @util.propertycache
802 @util.propertycache
800 def _compressor(self):
803 def _compressor(self):
801 engine = util.compengines[self._compengine]
804 engine = util.compengines[self._compengine]
802 return engine.revlogcompressor(self._compengineopts)
805 return engine.revlogcompressor(self._compengineopts)
803
806
804 @util.propertycache
807 @util.propertycache
805 def _decompressor(self):
808 def _decompressor(self):
806 """the default decompressor"""
809 """the default decompressor"""
807 if self._docket is None:
810 if self._docket is None:
808 return None
811 return None
809 t = self._docket.default_compression_header
812 t = self._docket.default_compression_header
810 c = self._get_decompressor(t)
813 c = self._get_decompressor(t)
811 return c.decompress
814 return c.decompress
812
815
813 def _indexfp(self):
816 def _indexfp(self):
814 """file object for the revlog's index file"""
817 """file object for the revlog's index file"""
815 return self.opener(self._indexfile, mode=b"r")
818 return self.opener(self._indexfile, mode=b"r")
816
819
817 def __index_write_fp(self):
820 def __index_write_fp(self):
818 # You should not use this directly and use `_writing` instead
821 # You should not use this directly and use `_writing` instead
819 try:
822 try:
820 f = self.opener(
823 f = self.opener(
821 self._indexfile, mode=b"r+", checkambig=self._checkambig
824 self._indexfile, mode=b"r+", checkambig=self._checkambig
822 )
825 )
823 if self._docket is None:
826 if self._docket is None:
824 f.seek(0, os.SEEK_END)
827 f.seek(0, os.SEEK_END)
825 else:
828 else:
826 f.seek(self._docket.index_end, os.SEEK_SET)
829 f.seek(self._docket.index_end, os.SEEK_SET)
827 return f
830 return f
828 except FileNotFoundError:
831 except FileNotFoundError:
829 return self.opener(
832 return self.opener(
830 self._indexfile, mode=b"w+", checkambig=self._checkambig
833 self._indexfile, mode=b"w+", checkambig=self._checkambig
831 )
834 )
832
835
833 def __index_new_fp(self):
836 def __index_new_fp(self):
834 # You should not use this unless you are upgrading from inline revlog
837 # You should not use this unless you are upgrading from inline revlog
835 return self.opener(
838 return self.opener(
836 self._indexfile,
839 self._indexfile,
837 mode=b"w",
840 mode=b"w",
838 checkambig=self._checkambig,
841 checkambig=self._checkambig,
839 atomictemp=True,
842 atomictemp=True,
840 )
843 )
841
844
842 def _datafp(self, mode=b'r'):
845 def _datafp(self, mode=b'r'):
843 """file object for the revlog's data file"""
846 """file object for the revlog's data file"""
844 return self.opener(self._datafile, mode=mode)
847 return self.opener(self._datafile, mode=mode)
845
848
846 @contextlib.contextmanager
849 @contextlib.contextmanager
847 def _sidedatareadfp(self):
850 def _sidedatareadfp(self):
848 """file object suitable to read sidedata"""
851 """file object suitable to read sidedata"""
849 if self._writinghandles:
852 if self._writinghandles:
850 yield self._writinghandles[2]
853 yield self._writinghandles[2]
851 else:
854 else:
852 with self.opener(self._sidedatafile) as fp:
855 with self.opener(self._sidedatafile) as fp:
853 yield fp
856 yield fp
854
857
855 def tiprev(self):
858 def tiprev(self):
856 return len(self.index) - 1
859 return len(self.index) - 1
857
860
858 def tip(self):
861 def tip(self):
859 return self.node(self.tiprev())
862 return self.node(self.tiprev())
860
863
861 def __contains__(self, rev):
864 def __contains__(self, rev):
862 return 0 <= rev < len(self)
865 return 0 <= rev < len(self)
863
866
864 def __len__(self):
867 def __len__(self):
865 return len(self.index)
868 return len(self.index)
866
869
867 def __iter__(self):
870 def __iter__(self):
868 return iter(range(len(self)))
871 return iter(range(len(self)))
869
872
870 def revs(self, start=0, stop=None):
873 def revs(self, start=0, stop=None):
871 """iterate over all rev in this revlog (from start to stop)"""
874 """iterate over all rev in this revlog (from start to stop)"""
872 return storageutil.iterrevs(len(self), start=start, stop=stop)
875 return storageutil.iterrevs(len(self), start=start, stop=stop)
873
876
874 def hasnode(self, node):
877 def hasnode(self, node):
875 try:
878 try:
876 self.rev(node)
879 self.rev(node)
877 return True
880 return True
878 except KeyError:
881 except KeyError:
879 return False
882 return False
880
883
881 def candelta(self, baserev, rev):
884 def candelta(self, baserev, rev):
882 """whether two revisions (baserev, rev) can be delta-ed or not"""
885 """whether two revisions (baserev, rev) can be delta-ed or not"""
883 # Disable delta if either rev requires a content-changing flag
886 # Disable delta if either rev requires a content-changing flag
884 # processor (ex. LFS). This is because such flag processor can alter
887 # processor (ex. LFS). This is because such flag processor can alter
885 # the rawtext content that the delta will be based on, and two clients
888 # the rawtext content that the delta will be based on, and two clients
886 # could have a same revlog node with different flags (i.e. different
889 # could have a same revlog node with different flags (i.e. different
887 # rawtext contents) and the delta could be incompatible.
890 # rawtext contents) and the delta could be incompatible.
888 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
891 if (self.flags(baserev) & REVIDX_RAWTEXT_CHANGING_FLAGS) or (
889 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
892 self.flags(rev) & REVIDX_RAWTEXT_CHANGING_FLAGS
890 ):
893 ):
891 return False
894 return False
892 return True
895 return True
893
896
894 def update_caches(self, transaction):
897 def update_caches(self, transaction):
895 if self._nodemap_file is not None:
898 if self._nodemap_file is not None:
896 if transaction is None:
899 if transaction is None:
897 nodemaputil.update_persistent_nodemap(self)
900 nodemaputil.update_persistent_nodemap(self)
898 else:
901 else:
899 nodemaputil.setup_persistent_nodemap(transaction, self)
902 nodemaputil.setup_persistent_nodemap(transaction, self)
900
903
901 def clearcaches(self):
904 def clearcaches(self):
902 self._revisioncache = None
905 self._revisioncache = None
903 self._chainbasecache.clear()
906 self._chainbasecache.clear()
904 self._segmentfile.clear_cache()
907 self._segmentfile.clear_cache()
905 self._segmentfile_sidedata.clear_cache()
908 self._segmentfile_sidedata.clear_cache()
906 self._pcache = {}
909 self._pcache = {}
907 self._nodemap_docket = None
910 self._nodemap_docket = None
908 self.index.clearcaches()
911 self.index.clearcaches()
909 # The python code is the one responsible for validating the docket, we
912 # The python code is the one responsible for validating the docket, we
910 # end up having to refresh it here.
913 # end up having to refresh it here.
911 use_nodemap = (
914 use_nodemap = (
912 not self._inline
915 not self._inline
913 and self._nodemap_file is not None
916 and self._nodemap_file is not None
914 and util.safehasattr(self.index, 'update_nodemap_data')
917 and util.safehasattr(self.index, 'update_nodemap_data')
915 )
918 )
916 if use_nodemap:
919 if use_nodemap:
917 nodemap_data = nodemaputil.persisted_data(self)
920 nodemap_data = nodemaputil.persisted_data(self)
918 if nodemap_data is not None:
921 if nodemap_data is not None:
919 self._nodemap_docket = nodemap_data[0]
922 self._nodemap_docket = nodemap_data[0]
920 self.index.update_nodemap_data(*nodemap_data)
923 self.index.update_nodemap_data(*nodemap_data)
921
924
922 def rev(self, node):
925 def rev(self, node):
923 try:
926 try:
924 return self.index.rev(node)
927 return self.index.rev(node)
925 except TypeError:
928 except TypeError:
926 raise
929 raise
927 except error.RevlogError:
930 except error.RevlogError:
928 # parsers.c radix tree lookup failed
931 # parsers.c radix tree lookup failed
929 if (
932 if (
930 node == self.nodeconstants.wdirid
933 node == self.nodeconstants.wdirid
931 or node in self.nodeconstants.wdirfilenodeids
934 or node in self.nodeconstants.wdirfilenodeids
932 ):
935 ):
933 raise error.WdirUnsupported
936 raise error.WdirUnsupported
934 raise error.LookupError(node, self.display_id, _(b'no node'))
937 raise error.LookupError(node, self.display_id, _(b'no node'))
935
938
936 # Accessors for index entries.
939 # Accessors for index entries.
937
940
938 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
941 # First tuple entry is 8 bytes. First 6 bytes are offset. Last 2 bytes
939 # are flags.
942 # are flags.
940 def start(self, rev):
943 def start(self, rev):
941 return int(self.index[rev][0] >> 16)
944 return int(self.index[rev][0] >> 16)
942
945
943 def sidedata_cut_off(self, rev):
946 def sidedata_cut_off(self, rev):
944 sd_cut_off = self.index[rev][8]
947 sd_cut_off = self.index[rev][8]
945 if sd_cut_off != 0:
948 if sd_cut_off != 0:
946 return sd_cut_off
949 return sd_cut_off
947 # This is some annoying dance, because entries without sidedata
950 # This is some annoying dance, because entries without sidedata
948 # currently use 0 as their ofsset. (instead of previous-offset +
951 # currently use 0 as their ofsset. (instead of previous-offset +
949 # previous-size)
952 # previous-size)
950 #
953 #
951 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
954 # We should reconsider this sidedata β†’ 0 sidata_offset policy.
952 # In the meantime, we need this.
955 # In the meantime, we need this.
953 while 0 <= rev:
956 while 0 <= rev:
954 e = self.index[rev]
957 e = self.index[rev]
955 if e[9] != 0:
958 if e[9] != 0:
956 return e[8] + e[9]
959 return e[8] + e[9]
957 rev -= 1
960 rev -= 1
958 return 0
961 return 0
959
962
960 def flags(self, rev):
963 def flags(self, rev):
961 return self.index[rev][0] & 0xFFFF
964 return self.index[rev][0] & 0xFFFF
962
965
963 def length(self, rev):
966 def length(self, rev):
964 return self.index[rev][1]
967 return self.index[rev][1]
965
968
966 def sidedata_length(self, rev):
969 def sidedata_length(self, rev):
967 if not self.hassidedata:
970 if not self.hassidedata:
968 return 0
971 return 0
969 return self.index[rev][9]
972 return self.index[rev][9]
970
973
971 def rawsize(self, rev):
974 def rawsize(self, rev):
972 """return the length of the uncompressed text for a given revision"""
975 """return the length of the uncompressed text for a given revision"""
973 l = self.index[rev][2]
976 l = self.index[rev][2]
974 if l >= 0:
977 if l >= 0:
975 return l
978 return l
976
979
977 t = self.rawdata(rev)
980 t = self.rawdata(rev)
978 return len(t)
981 return len(t)
979
982
980 def size(self, rev):
983 def size(self, rev):
981 """length of non-raw text (processed by a "read" flag processor)"""
984 """length of non-raw text (processed by a "read" flag processor)"""
982 # fast path: if no "read" flag processor could change the content,
985 # fast path: if no "read" flag processor could change the content,
983 # size is rawsize. note: ELLIPSIS is known to not change the content.
986 # size is rawsize. note: ELLIPSIS is known to not change the content.
984 flags = self.flags(rev)
987 flags = self.flags(rev)
985 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
988 if flags & (flagutil.REVIDX_KNOWN_FLAGS ^ REVIDX_ELLIPSIS) == 0:
986 return self.rawsize(rev)
989 return self.rawsize(rev)
987
990
988 return len(self.revision(rev))
991 return len(self.revision(rev))
989
992
990 def fast_rank(self, rev):
993 def fast_rank(self, rev):
991 """Return the rank of a revision if already known, or None otherwise.
994 """Return the rank of a revision if already known, or None otherwise.
992
995
993 The rank of a revision is the size of the sub-graph it defines as a
996 The rank of a revision is the size of the sub-graph it defines as a
994 head. Equivalently, the rank of a revision `r` is the size of the set
997 head. Equivalently, the rank of a revision `r` is the size of the set
995 `ancestors(r)`, `r` included.
998 `ancestors(r)`, `r` included.
996
999
997 This method returns the rank retrieved from the revlog in constant
1000 This method returns the rank retrieved from the revlog in constant
998 time. It makes no attempt at computing unknown values for versions of
1001 time. It makes no attempt at computing unknown values for versions of
999 the revlog which do not persist the rank.
1002 the revlog which do not persist the rank.
1000 """
1003 """
1001 rank = self.index[rev][ENTRY_RANK]
1004 rank = self.index[rev][ENTRY_RANK]
1002 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
1005 if self._format_version != CHANGELOGV2 or rank == RANK_UNKNOWN:
1003 return None
1006 return None
1004 if rev == nullrev:
1007 if rev == nullrev:
1005 return 0 # convention
1008 return 0 # convention
1006 return rank
1009 return rank
1007
1010
1008 def chainbase(self, rev):
1011 def chainbase(self, rev):
1009 base = self._chainbasecache.get(rev)
1012 base = self._chainbasecache.get(rev)
1010 if base is not None:
1013 if base is not None:
1011 return base
1014 return base
1012
1015
1013 index = self.index
1016 index = self.index
1014 iterrev = rev
1017 iterrev = rev
1015 base = index[iterrev][3]
1018 base = index[iterrev][3]
1016 while base != iterrev:
1019 while base != iterrev:
1017 iterrev = base
1020 iterrev = base
1018 base = index[iterrev][3]
1021 base = index[iterrev][3]
1019
1022
1020 self._chainbasecache[rev] = base
1023 self._chainbasecache[rev] = base
1021 return base
1024 return base
1022
1025
1023 def linkrev(self, rev):
1026 def linkrev(self, rev):
1024 return self.index[rev][4]
1027 return self.index[rev][4]
1025
1028
1026 def parentrevs(self, rev):
1029 def parentrevs(self, rev):
1027 try:
1030 try:
1028 entry = self.index[rev]
1031 entry = self.index[rev]
1029 except IndexError:
1032 except IndexError:
1030 if rev == wdirrev:
1033 if rev == wdirrev:
1031 raise error.WdirUnsupported
1034 raise error.WdirUnsupported
1032 raise
1035 raise
1033
1036
1034 if self.canonical_parent_order and entry[5] == nullrev:
1037 if self.canonical_parent_order and entry[5] == nullrev:
1035 return entry[6], entry[5]
1038 return entry[6], entry[5]
1036 else:
1039 else:
1037 return entry[5], entry[6]
1040 return entry[5], entry[6]
1038
1041
1039 # fast parentrevs(rev) where rev isn't filtered
1042 # fast parentrevs(rev) where rev isn't filtered
1040 _uncheckedparentrevs = parentrevs
1043 _uncheckedparentrevs = parentrevs
1041
1044
1042 def node(self, rev):
1045 def node(self, rev):
1043 try:
1046 try:
1044 return self.index[rev][7]
1047 return self.index[rev][7]
1045 except IndexError:
1048 except IndexError:
1046 if rev == wdirrev:
1049 if rev == wdirrev:
1047 raise error.WdirUnsupported
1050 raise error.WdirUnsupported
1048 raise
1051 raise
1049
1052
1050 # Derived from index values.
1053 # Derived from index values.
1051
1054
1052 def end(self, rev):
1055 def end(self, rev):
1053 return self.start(rev) + self.length(rev)
1056 return self.start(rev) + self.length(rev)
1054
1057
1055 def parents(self, node):
1058 def parents(self, node):
1056 i = self.index
1059 i = self.index
1057 d = i[self.rev(node)]
1060 d = i[self.rev(node)]
1058 # inline node() to avoid function call overhead
1061 # inline node() to avoid function call overhead
1059 if self.canonical_parent_order and d[5] == self.nullid:
1062 if self.canonical_parent_order and d[5] == self.nullid:
1060 return i[d[6]][7], i[d[5]][7]
1063 return i[d[6]][7], i[d[5]][7]
1061 else:
1064 else:
1062 return i[d[5]][7], i[d[6]][7]
1065 return i[d[5]][7], i[d[6]][7]
1063
1066
1064 def chainlen(self, rev):
1067 def chainlen(self, rev):
1065 return self._chaininfo(rev)[0]
1068 return self._chaininfo(rev)[0]
1066
1069
1067 def _chaininfo(self, rev):
1070 def _chaininfo(self, rev):
1068 chaininfocache = self._chaininfocache
1071 chaininfocache = self._chaininfocache
1069 if rev in chaininfocache:
1072 if rev in chaininfocache:
1070 return chaininfocache[rev]
1073 return chaininfocache[rev]
1071 index = self.index
1074 index = self.index
1072 generaldelta = self._generaldelta
1075 generaldelta = self._generaldelta
1073 iterrev = rev
1076 iterrev = rev
1074 e = index[iterrev]
1077 e = index[iterrev]
1075 clen = 0
1078 clen = 0
1076 compresseddeltalen = 0
1079 compresseddeltalen = 0
1077 while iterrev != e[3]:
1080 while iterrev != e[3]:
1078 clen += 1
1081 clen += 1
1079 compresseddeltalen += e[1]
1082 compresseddeltalen += e[1]
1080 if generaldelta:
1083 if generaldelta:
1081 iterrev = e[3]
1084 iterrev = e[3]
1082 else:
1085 else:
1083 iterrev -= 1
1086 iterrev -= 1
1084 if iterrev in chaininfocache:
1087 if iterrev in chaininfocache:
1085 t = chaininfocache[iterrev]
1088 t = chaininfocache[iterrev]
1086 clen += t[0]
1089 clen += t[0]
1087 compresseddeltalen += t[1]
1090 compresseddeltalen += t[1]
1088 break
1091 break
1089 e = index[iterrev]
1092 e = index[iterrev]
1090 else:
1093 else:
1091 # Add text length of base since decompressing that also takes
1094 # Add text length of base since decompressing that also takes
1092 # work. For cache hits the length is already included.
1095 # work. For cache hits the length is already included.
1093 compresseddeltalen += e[1]
1096 compresseddeltalen += e[1]
1094 r = (clen, compresseddeltalen)
1097 r = (clen, compresseddeltalen)
1095 chaininfocache[rev] = r
1098 chaininfocache[rev] = r
1096 return r
1099 return r
1097
1100
1098 def _deltachain(self, rev, stoprev=None):
1101 def _deltachain(self, rev, stoprev=None):
1099 """Obtain the delta chain for a revision.
1102 """Obtain the delta chain for a revision.
1100
1103
1101 ``stoprev`` specifies a revision to stop at. If not specified, we
1104 ``stoprev`` specifies a revision to stop at. If not specified, we
1102 stop at the base of the chain.
1105 stop at the base of the chain.
1103
1106
1104 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1107 Returns a 2-tuple of (chain, stopped) where ``chain`` is a list of
1105 revs in ascending order and ``stopped`` is a bool indicating whether
1108 revs in ascending order and ``stopped`` is a bool indicating whether
1106 ``stoprev`` was hit.
1109 ``stoprev`` was hit.
1107 """
1110 """
1108 # Try C implementation.
1111 # Try C implementation.
1109 try:
1112 try:
1110 return self.index.deltachain(rev, stoprev, self._generaldelta)
1113 return self.index.deltachain(rev, stoprev, self._generaldelta)
1111 except AttributeError:
1114 except AttributeError:
1112 pass
1115 pass
1113
1116
1114 chain = []
1117 chain = []
1115
1118
1116 # Alias to prevent attribute lookup in tight loop.
1119 # Alias to prevent attribute lookup in tight loop.
1117 index = self.index
1120 index = self.index
1118 generaldelta = self._generaldelta
1121 generaldelta = self._generaldelta
1119
1122
1120 iterrev = rev
1123 iterrev = rev
1121 e = index[iterrev]
1124 e = index[iterrev]
1122 while iterrev != e[3] and iterrev != stoprev:
1125 while iterrev != e[3] and iterrev != stoprev:
1123 chain.append(iterrev)
1126 chain.append(iterrev)
1124 if generaldelta:
1127 if generaldelta:
1125 iterrev = e[3]
1128 iterrev = e[3]
1126 else:
1129 else:
1127 iterrev -= 1
1130 iterrev -= 1
1128 e = index[iterrev]
1131 e = index[iterrev]
1129
1132
1130 if iterrev == stoprev:
1133 if iterrev == stoprev:
1131 stopped = True
1134 stopped = True
1132 else:
1135 else:
1133 chain.append(iterrev)
1136 chain.append(iterrev)
1134 stopped = False
1137 stopped = False
1135
1138
1136 chain.reverse()
1139 chain.reverse()
1137 return chain, stopped
1140 return chain, stopped
1138
1141
1139 def ancestors(self, revs, stoprev=0, inclusive=False):
1142 def ancestors(self, revs, stoprev=0, inclusive=False):
1140 """Generate the ancestors of 'revs' in reverse revision order.
1143 """Generate the ancestors of 'revs' in reverse revision order.
1141 Does not generate revs lower than stoprev.
1144 Does not generate revs lower than stoprev.
1142
1145
1143 See the documentation for ancestor.lazyancestors for more details."""
1146 See the documentation for ancestor.lazyancestors for more details."""
1144
1147
1145 # first, make sure start revisions aren't filtered
1148 # first, make sure start revisions aren't filtered
1146 revs = list(revs)
1149 revs = list(revs)
1147 checkrev = self.node
1150 checkrev = self.node
1148 for r in revs:
1151 for r in revs:
1149 checkrev(r)
1152 checkrev(r)
1150 # and we're sure ancestors aren't filtered as well
1153 # and we're sure ancestors aren't filtered as well
1151
1154
1152 if rustancestor is not None and self.index.rust_ext_compat:
1155 if rustancestor is not None and self.index.rust_ext_compat:
1153 lazyancestors = rustancestor.LazyAncestors
1156 lazyancestors = rustancestor.LazyAncestors
1154 arg = self.index
1157 arg = self.index
1155 else:
1158 else:
1156 lazyancestors = ancestor.lazyancestors
1159 lazyancestors = ancestor.lazyancestors
1157 arg = self._uncheckedparentrevs
1160 arg = self._uncheckedparentrevs
1158 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1161 return lazyancestors(arg, revs, stoprev=stoprev, inclusive=inclusive)
1159
1162
1160 def descendants(self, revs):
1163 def descendants(self, revs):
1161 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1164 return dagop.descendantrevs(revs, self.revs, self.parentrevs)
1162
1165
1163 def findcommonmissing(self, common=None, heads=None):
1166 def findcommonmissing(self, common=None, heads=None):
1164 """Return a tuple of the ancestors of common and the ancestors of heads
1167 """Return a tuple of the ancestors of common and the ancestors of heads
1165 that are not ancestors of common. In revset terminology, we return the
1168 that are not ancestors of common. In revset terminology, we return the
1166 tuple:
1169 tuple:
1167
1170
1168 ::common, (::heads) - (::common)
1171 ::common, (::heads) - (::common)
1169
1172
1170 The list is sorted by revision number, meaning it is
1173 The list is sorted by revision number, meaning it is
1171 topologically sorted.
1174 topologically sorted.
1172
1175
1173 'heads' and 'common' are both lists of node IDs. If heads is
1176 'heads' and 'common' are both lists of node IDs. If heads is
1174 not supplied, uses all of the revlog's heads. If common is not
1177 not supplied, uses all of the revlog's heads. If common is not
1175 supplied, uses nullid."""
1178 supplied, uses nullid."""
1176 if common is None:
1179 if common is None:
1177 common = [self.nullid]
1180 common = [self.nullid]
1178 if heads is None:
1181 if heads is None:
1179 heads = self.heads()
1182 heads = self.heads()
1180
1183
1181 common = [self.rev(n) for n in common]
1184 common = [self.rev(n) for n in common]
1182 heads = [self.rev(n) for n in heads]
1185 heads = [self.rev(n) for n in heads]
1183
1186
1184 # we want the ancestors, but inclusive
1187 # we want the ancestors, but inclusive
1185 class lazyset:
1188 class lazyset:
1186 def __init__(self, lazyvalues):
1189 def __init__(self, lazyvalues):
1187 self.addedvalues = set()
1190 self.addedvalues = set()
1188 self.lazyvalues = lazyvalues
1191 self.lazyvalues = lazyvalues
1189
1192
1190 def __contains__(self, value):
1193 def __contains__(self, value):
1191 return value in self.addedvalues or value in self.lazyvalues
1194 return value in self.addedvalues or value in self.lazyvalues
1192
1195
1193 def __iter__(self):
1196 def __iter__(self):
1194 added = self.addedvalues
1197 added = self.addedvalues
1195 for r in added:
1198 for r in added:
1196 yield r
1199 yield r
1197 for r in self.lazyvalues:
1200 for r in self.lazyvalues:
1198 if not r in added:
1201 if not r in added:
1199 yield r
1202 yield r
1200
1203
1201 def add(self, value):
1204 def add(self, value):
1202 self.addedvalues.add(value)
1205 self.addedvalues.add(value)
1203
1206
1204 def update(self, values):
1207 def update(self, values):
1205 self.addedvalues.update(values)
1208 self.addedvalues.update(values)
1206
1209
1207 has = lazyset(self.ancestors(common))
1210 has = lazyset(self.ancestors(common))
1208 has.add(nullrev)
1211 has.add(nullrev)
1209 has.update(common)
1212 has.update(common)
1210
1213
1211 # take all ancestors from heads that aren't in has
1214 # take all ancestors from heads that aren't in has
1212 missing = set()
1215 missing = set()
1213 visit = collections.deque(r for r in heads if r not in has)
1216 visit = collections.deque(r for r in heads if r not in has)
1214 while visit:
1217 while visit:
1215 r = visit.popleft()
1218 r = visit.popleft()
1216 if r in missing:
1219 if r in missing:
1217 continue
1220 continue
1218 else:
1221 else:
1219 missing.add(r)
1222 missing.add(r)
1220 for p in self.parentrevs(r):
1223 for p in self.parentrevs(r):
1221 if p not in has:
1224 if p not in has:
1222 visit.append(p)
1225 visit.append(p)
1223 missing = list(missing)
1226 missing = list(missing)
1224 missing.sort()
1227 missing.sort()
1225 return has, [self.node(miss) for miss in missing]
1228 return has, [self.node(miss) for miss in missing]
1226
1229
1227 def incrementalmissingrevs(self, common=None):
1230 def incrementalmissingrevs(self, common=None):
1228 """Return an object that can be used to incrementally compute the
1231 """Return an object that can be used to incrementally compute the
1229 revision numbers of the ancestors of arbitrary sets that are not
1232 revision numbers of the ancestors of arbitrary sets that are not
1230 ancestors of common. This is an ancestor.incrementalmissingancestors
1233 ancestors of common. This is an ancestor.incrementalmissingancestors
1231 object.
1234 object.
1232
1235
1233 'common' is a list of revision numbers. If common is not supplied, uses
1236 'common' is a list of revision numbers. If common is not supplied, uses
1234 nullrev.
1237 nullrev.
1235 """
1238 """
1236 if common is None:
1239 if common is None:
1237 common = [nullrev]
1240 common = [nullrev]
1238
1241
1239 if rustancestor is not None and self.index.rust_ext_compat:
1242 if rustancestor is not None and self.index.rust_ext_compat:
1240 return rustancestor.MissingAncestors(self.index, common)
1243 return rustancestor.MissingAncestors(self.index, common)
1241 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1244 return ancestor.incrementalmissingancestors(self.parentrevs, common)
1242
1245
1243 def findmissingrevs(self, common=None, heads=None):
1246 def findmissingrevs(self, common=None, heads=None):
1244 """Return the revision numbers of the ancestors of heads that
1247 """Return the revision numbers of the ancestors of heads that
1245 are not ancestors of common.
1248 are not ancestors of common.
1246
1249
1247 More specifically, return a list of revision numbers corresponding to
1250 More specifically, return a list of revision numbers corresponding to
1248 nodes N such that every N satisfies the following constraints:
1251 nodes N such that every N satisfies the following constraints:
1249
1252
1250 1. N is an ancestor of some node in 'heads'
1253 1. N is an ancestor of some node in 'heads'
1251 2. N is not an ancestor of any node in 'common'
1254 2. N is not an ancestor of any node in 'common'
1252
1255
1253 The list is sorted by revision number, meaning it is
1256 The list is sorted by revision number, meaning it is
1254 topologically sorted.
1257 topologically sorted.
1255
1258
1256 'heads' and 'common' are both lists of revision numbers. If heads is
1259 'heads' and 'common' are both lists of revision numbers. If heads is
1257 not supplied, uses all of the revlog's heads. If common is not
1260 not supplied, uses all of the revlog's heads. If common is not
1258 supplied, uses nullid."""
1261 supplied, uses nullid."""
1259 if common is None:
1262 if common is None:
1260 common = [nullrev]
1263 common = [nullrev]
1261 if heads is None:
1264 if heads is None:
1262 heads = self.headrevs()
1265 heads = self.headrevs()
1263
1266
1264 inc = self.incrementalmissingrevs(common=common)
1267 inc = self.incrementalmissingrevs(common=common)
1265 return inc.missingancestors(heads)
1268 return inc.missingancestors(heads)
1266
1269
1267 def findmissing(self, common=None, heads=None):
1270 def findmissing(self, common=None, heads=None):
1268 """Return the ancestors of heads that are not ancestors of common.
1271 """Return the ancestors of heads that are not ancestors of common.
1269
1272
1270 More specifically, return a list of nodes N such that every N
1273 More specifically, return a list of nodes N such that every N
1271 satisfies the following constraints:
1274 satisfies the following constraints:
1272
1275
1273 1. N is an ancestor of some node in 'heads'
1276 1. N is an ancestor of some node in 'heads'
1274 2. N is not an ancestor of any node in 'common'
1277 2. N is not an ancestor of any node in 'common'
1275
1278
1276 The list is sorted by revision number, meaning it is
1279 The list is sorted by revision number, meaning it is
1277 topologically sorted.
1280 topologically sorted.
1278
1281
1279 'heads' and 'common' are both lists of node IDs. If heads is
1282 'heads' and 'common' are both lists of node IDs. If heads is
1280 not supplied, uses all of the revlog's heads. If common is not
1283 not supplied, uses all of the revlog's heads. If common is not
1281 supplied, uses nullid."""
1284 supplied, uses nullid."""
1282 if common is None:
1285 if common is None:
1283 common = [self.nullid]
1286 common = [self.nullid]
1284 if heads is None:
1287 if heads is None:
1285 heads = self.heads()
1288 heads = self.heads()
1286
1289
1287 common = [self.rev(n) for n in common]
1290 common = [self.rev(n) for n in common]
1288 heads = [self.rev(n) for n in heads]
1291 heads = [self.rev(n) for n in heads]
1289
1292
1290 inc = self.incrementalmissingrevs(common=common)
1293 inc = self.incrementalmissingrevs(common=common)
1291 return [self.node(r) for r in inc.missingancestors(heads)]
1294 return [self.node(r) for r in inc.missingancestors(heads)]
1292
1295
1293 def nodesbetween(self, roots=None, heads=None):
1296 def nodesbetween(self, roots=None, heads=None):
1294 """Return a topological path from 'roots' to 'heads'.
1297 """Return a topological path from 'roots' to 'heads'.
1295
1298
1296 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1299 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
1297 topologically sorted list of all nodes N that satisfy both of
1300 topologically sorted list of all nodes N that satisfy both of
1298 these constraints:
1301 these constraints:
1299
1302
1300 1. N is a descendant of some node in 'roots'
1303 1. N is a descendant of some node in 'roots'
1301 2. N is an ancestor of some node in 'heads'
1304 2. N is an ancestor of some node in 'heads'
1302
1305
1303 Every node is considered to be both a descendant and an ancestor
1306 Every node is considered to be both a descendant and an ancestor
1304 of itself, so every reachable node in 'roots' and 'heads' will be
1307 of itself, so every reachable node in 'roots' and 'heads' will be
1305 included in 'nodes'.
1308 included in 'nodes'.
1306
1309
1307 'outroots' is the list of reachable nodes in 'roots', i.e., the
1310 'outroots' is the list of reachable nodes in 'roots', i.e., the
1308 subset of 'roots' that is returned in 'nodes'. Likewise,
1311 subset of 'roots' that is returned in 'nodes'. Likewise,
1309 'outheads' is the subset of 'heads' that is also in 'nodes'.
1312 'outheads' is the subset of 'heads' that is also in 'nodes'.
1310
1313
1311 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1314 'roots' and 'heads' are both lists of node IDs. If 'roots' is
1312 unspecified, uses nullid as the only root. If 'heads' is
1315 unspecified, uses nullid as the only root. If 'heads' is
1313 unspecified, uses list of all of the revlog's heads."""
1316 unspecified, uses list of all of the revlog's heads."""
1314 nonodes = ([], [], [])
1317 nonodes = ([], [], [])
1315 if roots is not None:
1318 if roots is not None:
1316 roots = list(roots)
1319 roots = list(roots)
1317 if not roots:
1320 if not roots:
1318 return nonodes
1321 return nonodes
1319 lowestrev = min([self.rev(n) for n in roots])
1322 lowestrev = min([self.rev(n) for n in roots])
1320 else:
1323 else:
1321 roots = [self.nullid] # Everybody's a descendant of nullid
1324 roots = [self.nullid] # Everybody's a descendant of nullid
1322 lowestrev = nullrev
1325 lowestrev = nullrev
1323 if (lowestrev == nullrev) and (heads is None):
1326 if (lowestrev == nullrev) and (heads is None):
1324 # We want _all_ the nodes!
1327 # We want _all_ the nodes!
1325 return (
1328 return (
1326 [self.node(r) for r in self],
1329 [self.node(r) for r in self],
1327 [self.nullid],
1330 [self.nullid],
1328 list(self.heads()),
1331 list(self.heads()),
1329 )
1332 )
1330 if heads is None:
1333 if heads is None:
1331 # All nodes are ancestors, so the latest ancestor is the last
1334 # All nodes are ancestors, so the latest ancestor is the last
1332 # node.
1335 # node.
1333 highestrev = len(self) - 1
1336 highestrev = len(self) - 1
1334 # Set ancestors to None to signal that every node is an ancestor.
1337 # Set ancestors to None to signal that every node is an ancestor.
1335 ancestors = None
1338 ancestors = None
1336 # Set heads to an empty dictionary for later discovery of heads
1339 # Set heads to an empty dictionary for later discovery of heads
1337 heads = {}
1340 heads = {}
1338 else:
1341 else:
1339 heads = list(heads)
1342 heads = list(heads)
1340 if not heads:
1343 if not heads:
1341 return nonodes
1344 return nonodes
1342 ancestors = set()
1345 ancestors = set()
1343 # Turn heads into a dictionary so we can remove 'fake' heads.
1346 # Turn heads into a dictionary so we can remove 'fake' heads.
1344 # Also, later we will be using it to filter out the heads we can't
1347 # Also, later we will be using it to filter out the heads we can't
1345 # find from roots.
1348 # find from roots.
1346 heads = dict.fromkeys(heads, False)
1349 heads = dict.fromkeys(heads, False)
1347 # Start at the top and keep marking parents until we're done.
1350 # Start at the top and keep marking parents until we're done.
1348 nodestotag = set(heads)
1351 nodestotag = set(heads)
1349 # Remember where the top was so we can use it as a limit later.
1352 # Remember where the top was so we can use it as a limit later.
1350 highestrev = max([self.rev(n) for n in nodestotag])
1353 highestrev = max([self.rev(n) for n in nodestotag])
1351 while nodestotag:
1354 while nodestotag:
1352 # grab a node to tag
1355 # grab a node to tag
1353 n = nodestotag.pop()
1356 n = nodestotag.pop()
1354 # Never tag nullid
1357 # Never tag nullid
1355 if n == self.nullid:
1358 if n == self.nullid:
1356 continue
1359 continue
1357 # A node's revision number represents its place in a
1360 # A node's revision number represents its place in a
1358 # topologically sorted list of nodes.
1361 # topologically sorted list of nodes.
1359 r = self.rev(n)
1362 r = self.rev(n)
1360 if r >= lowestrev:
1363 if r >= lowestrev:
1361 if n not in ancestors:
1364 if n not in ancestors:
1362 # If we are possibly a descendant of one of the roots
1365 # If we are possibly a descendant of one of the roots
1363 # and we haven't already been marked as an ancestor
1366 # and we haven't already been marked as an ancestor
1364 ancestors.add(n) # Mark as ancestor
1367 ancestors.add(n) # Mark as ancestor
1365 # Add non-nullid parents to list of nodes to tag.
1368 # Add non-nullid parents to list of nodes to tag.
1366 nodestotag.update(
1369 nodestotag.update(
1367 [p for p in self.parents(n) if p != self.nullid]
1370 [p for p in self.parents(n) if p != self.nullid]
1368 )
1371 )
1369 elif n in heads: # We've seen it before, is it a fake head?
1372 elif n in heads: # We've seen it before, is it a fake head?
1370 # So it is, real heads should not be the ancestors of
1373 # So it is, real heads should not be the ancestors of
1371 # any other heads.
1374 # any other heads.
1372 heads.pop(n)
1375 heads.pop(n)
1373 if not ancestors:
1376 if not ancestors:
1374 return nonodes
1377 return nonodes
1375 # Now that we have our set of ancestors, we want to remove any
1378 # Now that we have our set of ancestors, we want to remove any
1376 # roots that are not ancestors.
1379 # roots that are not ancestors.
1377
1380
1378 # If one of the roots was nullid, everything is included anyway.
1381 # If one of the roots was nullid, everything is included anyway.
1379 if lowestrev > nullrev:
1382 if lowestrev > nullrev:
1380 # But, since we weren't, let's recompute the lowest rev to not
1383 # But, since we weren't, let's recompute the lowest rev to not
1381 # include roots that aren't ancestors.
1384 # include roots that aren't ancestors.
1382
1385
1383 # Filter out roots that aren't ancestors of heads
1386 # Filter out roots that aren't ancestors of heads
1384 roots = [root for root in roots if root in ancestors]
1387 roots = [root for root in roots if root in ancestors]
1385 # Recompute the lowest revision
1388 # Recompute the lowest revision
1386 if roots:
1389 if roots:
1387 lowestrev = min([self.rev(root) for root in roots])
1390 lowestrev = min([self.rev(root) for root in roots])
1388 else:
1391 else:
1389 # No more roots? Return empty list
1392 # No more roots? Return empty list
1390 return nonodes
1393 return nonodes
1391 else:
1394 else:
1392 # We are descending from nullid, and don't need to care about
1395 # We are descending from nullid, and don't need to care about
1393 # any other roots.
1396 # any other roots.
1394 lowestrev = nullrev
1397 lowestrev = nullrev
1395 roots = [self.nullid]
1398 roots = [self.nullid]
1396 # Transform our roots list into a set.
1399 # Transform our roots list into a set.
1397 descendants = set(roots)
1400 descendants = set(roots)
1398 # Also, keep the original roots so we can filter out roots that aren't
1401 # Also, keep the original roots so we can filter out roots that aren't
1399 # 'real' roots (i.e. are descended from other roots).
1402 # 'real' roots (i.e. are descended from other roots).
1400 roots = descendants.copy()
1403 roots = descendants.copy()
1401 # Our topologically sorted list of output nodes.
1404 # Our topologically sorted list of output nodes.
1402 orderedout = []
1405 orderedout = []
1403 # Don't start at nullid since we don't want nullid in our output list,
1406 # Don't start at nullid since we don't want nullid in our output list,
1404 # and if nullid shows up in descendants, empty parents will look like
1407 # and if nullid shows up in descendants, empty parents will look like
1405 # they're descendants.
1408 # they're descendants.
1406 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1409 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
1407 n = self.node(r)
1410 n = self.node(r)
1408 isdescendant = False
1411 isdescendant = False
1409 if lowestrev == nullrev: # Everybody is a descendant of nullid
1412 if lowestrev == nullrev: # Everybody is a descendant of nullid
1410 isdescendant = True
1413 isdescendant = True
1411 elif n in descendants:
1414 elif n in descendants:
1412 # n is already a descendant
1415 # n is already a descendant
1413 isdescendant = True
1416 isdescendant = True
1414 # This check only needs to be done here because all the roots
1417 # This check only needs to be done here because all the roots
1415 # will start being marked is descendants before the loop.
1418 # will start being marked is descendants before the loop.
1416 if n in roots:
1419 if n in roots:
1417 # If n was a root, check if it's a 'real' root.
1420 # If n was a root, check if it's a 'real' root.
1418 p = tuple(self.parents(n))
1421 p = tuple(self.parents(n))
1419 # If any of its parents are descendants, it's not a root.
1422 # If any of its parents are descendants, it's not a root.
1420 if (p[0] in descendants) or (p[1] in descendants):
1423 if (p[0] in descendants) or (p[1] in descendants):
1421 roots.remove(n)
1424 roots.remove(n)
1422 else:
1425 else:
1423 p = tuple(self.parents(n))
1426 p = tuple(self.parents(n))
1424 # A node is a descendant if either of its parents are
1427 # A node is a descendant if either of its parents are
1425 # descendants. (We seeded the dependents list with the roots
1428 # descendants. (We seeded the dependents list with the roots
1426 # up there, remember?)
1429 # up there, remember?)
1427 if (p[0] in descendants) or (p[1] in descendants):
1430 if (p[0] in descendants) or (p[1] in descendants):
1428 descendants.add(n)
1431 descendants.add(n)
1429 isdescendant = True
1432 isdescendant = True
1430 if isdescendant and ((ancestors is None) or (n in ancestors)):
1433 if isdescendant and ((ancestors is None) or (n in ancestors)):
1431 # Only include nodes that are both descendants and ancestors.
1434 # Only include nodes that are both descendants and ancestors.
1432 orderedout.append(n)
1435 orderedout.append(n)
1433 if (ancestors is not None) and (n in heads):
1436 if (ancestors is not None) and (n in heads):
1434 # We're trying to figure out which heads are reachable
1437 # We're trying to figure out which heads are reachable
1435 # from roots.
1438 # from roots.
1436 # Mark this head as having been reached
1439 # Mark this head as having been reached
1437 heads[n] = True
1440 heads[n] = True
1438 elif ancestors is None:
1441 elif ancestors is None:
1439 # Otherwise, we're trying to discover the heads.
1442 # Otherwise, we're trying to discover the heads.
1440 # Assume this is a head because if it isn't, the next step
1443 # Assume this is a head because if it isn't, the next step
1441 # will eventually remove it.
1444 # will eventually remove it.
1442 heads[n] = True
1445 heads[n] = True
1443 # But, obviously its parents aren't.
1446 # But, obviously its parents aren't.
1444 for p in self.parents(n):
1447 for p in self.parents(n):
1445 heads.pop(p, None)
1448 heads.pop(p, None)
1446 heads = [head for head, flag in heads.items() if flag]
1449 heads = [head for head, flag in heads.items() if flag]
1447 roots = list(roots)
1450 roots = list(roots)
1448 assert orderedout
1451 assert orderedout
1449 assert roots
1452 assert roots
1450 assert heads
1453 assert heads
1451 return (orderedout, roots, heads)
1454 return (orderedout, roots, heads)
1452
1455
1453 def headrevs(self, revs=None):
1456 def headrevs(self, revs=None):
1454 if revs is None:
1457 if revs is None:
1455 try:
1458 try:
1456 return self.index.headrevs()
1459 return self.index.headrevs()
1457 except AttributeError:
1460 except AttributeError:
1458 return self._headrevs()
1461 return self._headrevs()
1459 if rustdagop is not None and self.index.rust_ext_compat:
1462 if rustdagop is not None and self.index.rust_ext_compat:
1460 return rustdagop.headrevs(self.index, revs)
1463 return rustdagop.headrevs(self.index, revs)
1461 return dagop.headrevs(revs, self._uncheckedparentrevs)
1464 return dagop.headrevs(revs, self._uncheckedparentrevs)
1462
1465
1463 def computephases(self, roots):
1466 def computephases(self, roots):
1464 return self.index.computephasesmapsets(roots)
1467 return self.index.computephasesmapsets(roots)
1465
1468
1466 def _headrevs(self):
1469 def _headrevs(self):
1467 count = len(self)
1470 count = len(self)
1468 if not count:
1471 if not count:
1469 return [nullrev]
1472 return [nullrev]
1470 # we won't iter over filtered rev so nobody is a head at start
1473 # we won't iter over filtered rev so nobody is a head at start
1471 ishead = [0] * (count + 1)
1474 ishead = [0] * (count + 1)
1472 index = self.index
1475 index = self.index
1473 for r in self:
1476 for r in self:
1474 ishead[r] = 1 # I may be an head
1477 ishead[r] = 1 # I may be an head
1475 e = index[r]
1478 e = index[r]
1476 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1479 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
1477 return [r for r, val in enumerate(ishead) if val]
1480 return [r for r, val in enumerate(ishead) if val]
1478
1481
1479 def heads(self, start=None, stop=None):
1482 def heads(self, start=None, stop=None):
1480 """return the list of all nodes that have no children
1483 """return the list of all nodes that have no children
1481
1484
1482 if start is specified, only heads that are descendants of
1485 if start is specified, only heads that are descendants of
1483 start will be returned
1486 start will be returned
1484 if stop is specified, it will consider all the revs from stop
1487 if stop is specified, it will consider all the revs from stop
1485 as if they had no children
1488 as if they had no children
1486 """
1489 """
1487 if start is None and stop is None:
1490 if start is None and stop is None:
1488 if not len(self):
1491 if not len(self):
1489 return [self.nullid]
1492 return [self.nullid]
1490 return [self.node(r) for r in self.headrevs()]
1493 return [self.node(r) for r in self.headrevs()]
1491
1494
1492 if start is None:
1495 if start is None:
1493 start = nullrev
1496 start = nullrev
1494 else:
1497 else:
1495 start = self.rev(start)
1498 start = self.rev(start)
1496
1499
1497 stoprevs = {self.rev(n) for n in stop or []}
1500 stoprevs = {self.rev(n) for n in stop or []}
1498
1501
1499 revs = dagop.headrevssubset(
1502 revs = dagop.headrevssubset(
1500 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1503 self.revs, self.parentrevs, startrev=start, stoprevs=stoprevs
1501 )
1504 )
1502
1505
1503 return [self.node(rev) for rev in revs]
1506 return [self.node(rev) for rev in revs]
1504
1507
1505 def children(self, node):
1508 def children(self, node):
1506 """find the children of a given node"""
1509 """find the children of a given node"""
1507 c = []
1510 c = []
1508 p = self.rev(node)
1511 p = self.rev(node)
1509 for r in self.revs(start=p + 1):
1512 for r in self.revs(start=p + 1):
1510 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1513 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
1511 if prevs:
1514 if prevs:
1512 for pr in prevs:
1515 for pr in prevs:
1513 if pr == p:
1516 if pr == p:
1514 c.append(self.node(r))
1517 c.append(self.node(r))
1515 elif p == nullrev:
1518 elif p == nullrev:
1516 c.append(self.node(r))
1519 c.append(self.node(r))
1517 return c
1520 return c
1518
1521
1519 def commonancestorsheads(self, a, b):
1522 def commonancestorsheads(self, a, b):
1520 """calculate all the heads of the common ancestors of nodes a and b"""
1523 """calculate all the heads of the common ancestors of nodes a and b"""
1521 a, b = self.rev(a), self.rev(b)
1524 a, b = self.rev(a), self.rev(b)
1522 ancs = self._commonancestorsheads(a, b)
1525 ancs = self._commonancestorsheads(a, b)
1523 return pycompat.maplist(self.node, ancs)
1526 return pycompat.maplist(self.node, ancs)
1524
1527
1525 def _commonancestorsheads(self, *revs):
1528 def _commonancestorsheads(self, *revs):
1526 """calculate all the heads of the common ancestors of revs"""
1529 """calculate all the heads of the common ancestors of revs"""
1527 try:
1530 try:
1528 ancs = self.index.commonancestorsheads(*revs)
1531 ancs = self.index.commonancestorsheads(*revs)
1529 except (AttributeError, OverflowError): # C implementation failed
1532 except (AttributeError, OverflowError): # C implementation failed
1530 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1533 ancs = ancestor.commonancestorsheads(self.parentrevs, *revs)
1531 return ancs
1534 return ancs
1532
1535
1533 def isancestor(self, a, b):
1536 def isancestor(self, a, b):
1534 """return True if node a is an ancestor of node b
1537 """return True if node a is an ancestor of node b
1535
1538
1536 A revision is considered an ancestor of itself."""
1539 A revision is considered an ancestor of itself."""
1537 a, b = self.rev(a), self.rev(b)
1540 a, b = self.rev(a), self.rev(b)
1538 return self.isancestorrev(a, b)
1541 return self.isancestorrev(a, b)
1539
1542
1540 def isancestorrev(self, a, b):
1543 def isancestorrev(self, a, b):
1541 """return True if revision a is an ancestor of revision b
1544 """return True if revision a is an ancestor of revision b
1542
1545
1543 A revision is considered an ancestor of itself.
1546 A revision is considered an ancestor of itself.
1544
1547
1545 The implementation of this is trivial but the use of
1548 The implementation of this is trivial but the use of
1546 reachableroots is not."""
1549 reachableroots is not."""
1547 if a == nullrev:
1550 if a == nullrev:
1548 return True
1551 return True
1549 elif a == b:
1552 elif a == b:
1550 return True
1553 return True
1551 elif a > b:
1554 elif a > b:
1552 return False
1555 return False
1553 return bool(self.reachableroots(a, [b], [a], includepath=False))
1556 return bool(self.reachableroots(a, [b], [a], includepath=False))
1554
1557
1555 def reachableroots(self, minroot, heads, roots, includepath=False):
1558 def reachableroots(self, minroot, heads, roots, includepath=False):
1556 """return (heads(::(<roots> and <roots>::<heads>)))
1559 """return (heads(::(<roots> and <roots>::<heads>)))
1557
1560
1558 If includepath is True, return (<roots>::<heads>)."""
1561 If includepath is True, return (<roots>::<heads>)."""
1559 try:
1562 try:
1560 return self.index.reachableroots2(
1563 return self.index.reachableroots2(
1561 minroot, heads, roots, includepath
1564 minroot, heads, roots, includepath
1562 )
1565 )
1563 except AttributeError:
1566 except AttributeError:
1564 return dagop._reachablerootspure(
1567 return dagop._reachablerootspure(
1565 self.parentrevs, minroot, roots, heads, includepath
1568 self.parentrevs, minroot, roots, heads, includepath
1566 )
1569 )
1567
1570
1568 def ancestor(self, a, b):
1571 def ancestor(self, a, b):
1569 """calculate the "best" common ancestor of nodes a and b"""
1572 """calculate the "best" common ancestor of nodes a and b"""
1570
1573
1571 a, b = self.rev(a), self.rev(b)
1574 a, b = self.rev(a), self.rev(b)
1572 try:
1575 try:
1573 ancs = self.index.ancestors(a, b)
1576 ancs = self.index.ancestors(a, b)
1574 except (AttributeError, OverflowError):
1577 except (AttributeError, OverflowError):
1575 ancs = ancestor.ancestors(self.parentrevs, a, b)
1578 ancs = ancestor.ancestors(self.parentrevs, a, b)
1576 if ancs:
1579 if ancs:
1577 # choose a consistent winner when there's a tie
1580 # choose a consistent winner when there's a tie
1578 return min(map(self.node, ancs))
1581 return min(map(self.node, ancs))
1579 return self.nullid
1582 return self.nullid
1580
1583
1581 def _match(self, id):
1584 def _match(self, id):
1582 if isinstance(id, int):
1585 if isinstance(id, int):
1583 # rev
1586 # rev
1584 return self.node(id)
1587 return self.node(id)
1585 if len(id) == self.nodeconstants.nodelen:
1588 if len(id) == self.nodeconstants.nodelen:
1586 # possibly a binary node
1589 # possibly a binary node
1587 # odds of a binary node being all hex in ASCII are 1 in 10**25
1590 # odds of a binary node being all hex in ASCII are 1 in 10**25
1588 try:
1591 try:
1589 node = id
1592 node = id
1590 self.rev(node) # quick search the index
1593 self.rev(node) # quick search the index
1591 return node
1594 return node
1592 except error.LookupError:
1595 except error.LookupError:
1593 pass # may be partial hex id
1596 pass # may be partial hex id
1594 try:
1597 try:
1595 # str(rev)
1598 # str(rev)
1596 rev = int(id)
1599 rev = int(id)
1597 if b"%d" % rev != id:
1600 if b"%d" % rev != id:
1598 raise ValueError
1601 raise ValueError
1599 if rev < 0:
1602 if rev < 0:
1600 rev = len(self) + rev
1603 rev = len(self) + rev
1601 if rev < 0 or rev >= len(self):
1604 if rev < 0 or rev >= len(self):
1602 raise ValueError
1605 raise ValueError
1603 return self.node(rev)
1606 return self.node(rev)
1604 except (ValueError, OverflowError):
1607 except (ValueError, OverflowError):
1605 pass
1608 pass
1606 if len(id) == 2 * self.nodeconstants.nodelen:
1609 if len(id) == 2 * self.nodeconstants.nodelen:
1607 try:
1610 try:
1608 # a full hex nodeid?
1611 # a full hex nodeid?
1609 node = bin(id)
1612 node = bin(id)
1610 self.rev(node)
1613 self.rev(node)
1611 return node
1614 return node
1612 except (binascii.Error, error.LookupError):
1615 except (binascii.Error, error.LookupError):
1613 pass
1616 pass
1614
1617
1615 def _partialmatch(self, id):
1618 def _partialmatch(self, id):
1616 # we don't care wdirfilenodeids as they should be always full hash
1619 # we don't care wdirfilenodeids as they should be always full hash
1617 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1620 maybewdir = self.nodeconstants.wdirhex.startswith(id)
1618 ambiguous = False
1621 ambiguous = False
1619 try:
1622 try:
1620 partial = self.index.partialmatch(id)
1623 partial = self.index.partialmatch(id)
1621 if partial and self.hasnode(partial):
1624 if partial and self.hasnode(partial):
1622 if maybewdir:
1625 if maybewdir:
1623 # single 'ff...' match in radix tree, ambiguous with wdir
1626 # single 'ff...' match in radix tree, ambiguous with wdir
1624 ambiguous = True
1627 ambiguous = True
1625 else:
1628 else:
1626 return partial
1629 return partial
1627 elif maybewdir:
1630 elif maybewdir:
1628 # no 'ff...' match in radix tree, wdir identified
1631 # no 'ff...' match in radix tree, wdir identified
1629 raise error.WdirUnsupported
1632 raise error.WdirUnsupported
1630 else:
1633 else:
1631 return None
1634 return None
1632 except error.RevlogError:
1635 except error.RevlogError:
1633 # parsers.c radix tree lookup gave multiple matches
1636 # parsers.c radix tree lookup gave multiple matches
1634 # fast path: for unfiltered changelog, radix tree is accurate
1637 # fast path: for unfiltered changelog, radix tree is accurate
1635 if not getattr(self, 'filteredrevs', None):
1638 if not getattr(self, 'filteredrevs', None):
1636 ambiguous = True
1639 ambiguous = True
1637 # fall through to slow path that filters hidden revisions
1640 # fall through to slow path that filters hidden revisions
1638 except (AttributeError, ValueError):
1641 except (AttributeError, ValueError):
1639 # we are pure python, or key is not hex
1642 # we are pure python, or key is not hex
1640 pass
1643 pass
1641 if ambiguous:
1644 if ambiguous:
1642 raise error.AmbiguousPrefixLookupError(
1645 raise error.AmbiguousPrefixLookupError(
1643 id, self.display_id, _(b'ambiguous identifier')
1646 id, self.display_id, _(b'ambiguous identifier')
1644 )
1647 )
1645
1648
1646 if id in self._pcache:
1649 if id in self._pcache:
1647 return self._pcache[id]
1650 return self._pcache[id]
1648
1651
1649 if len(id) <= 40:
1652 if len(id) <= 40:
1650 # hex(node)[:...]
1653 # hex(node)[:...]
1651 l = len(id) // 2 * 2 # grab an even number of digits
1654 l = len(id) // 2 * 2 # grab an even number of digits
1652 try:
1655 try:
1653 # we're dropping the last digit, so let's check that it's hex,
1656 # we're dropping the last digit, so let's check that it's hex,
1654 # to avoid the expensive computation below if it's not
1657 # to avoid the expensive computation below if it's not
1655 if len(id) % 2 > 0:
1658 if len(id) % 2 > 0:
1656 if not (id[-1] in hexdigits):
1659 if not (id[-1] in hexdigits):
1657 return None
1660 return None
1658 prefix = bin(id[:l])
1661 prefix = bin(id[:l])
1659 except binascii.Error:
1662 except binascii.Error:
1660 pass
1663 pass
1661 else:
1664 else:
1662 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1665 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
1663 nl = [
1666 nl = [
1664 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1667 n for n in nl if hex(n).startswith(id) and self.hasnode(n)
1665 ]
1668 ]
1666 if self.nodeconstants.nullhex.startswith(id):
1669 if self.nodeconstants.nullhex.startswith(id):
1667 nl.append(self.nullid)
1670 nl.append(self.nullid)
1668 if len(nl) > 0:
1671 if len(nl) > 0:
1669 if len(nl) == 1 and not maybewdir:
1672 if len(nl) == 1 and not maybewdir:
1670 self._pcache[id] = nl[0]
1673 self._pcache[id] = nl[0]
1671 return nl[0]
1674 return nl[0]
1672 raise error.AmbiguousPrefixLookupError(
1675 raise error.AmbiguousPrefixLookupError(
1673 id, self.display_id, _(b'ambiguous identifier')
1676 id, self.display_id, _(b'ambiguous identifier')
1674 )
1677 )
1675 if maybewdir:
1678 if maybewdir:
1676 raise error.WdirUnsupported
1679 raise error.WdirUnsupported
1677 return None
1680 return None
1678
1681
1679 def lookup(self, id):
1682 def lookup(self, id):
1680 """locate a node based on:
1683 """locate a node based on:
1681 - revision number or str(revision number)
1684 - revision number or str(revision number)
1682 - nodeid or subset of hex nodeid
1685 - nodeid or subset of hex nodeid
1683 """
1686 """
1684 n = self._match(id)
1687 n = self._match(id)
1685 if n is not None:
1688 if n is not None:
1686 return n
1689 return n
1687 n = self._partialmatch(id)
1690 n = self._partialmatch(id)
1688 if n:
1691 if n:
1689 return n
1692 return n
1690
1693
1691 raise error.LookupError(id, self.display_id, _(b'no match found'))
1694 raise error.LookupError(id, self.display_id, _(b'no match found'))
1692
1695
1693 def shortest(self, node, minlength=1):
1696 def shortest(self, node, minlength=1):
1694 """Find the shortest unambiguous prefix that matches node."""
1697 """Find the shortest unambiguous prefix that matches node."""
1695
1698
1696 def isvalid(prefix):
1699 def isvalid(prefix):
1697 try:
1700 try:
1698 matchednode = self._partialmatch(prefix)
1701 matchednode = self._partialmatch(prefix)
1699 except error.AmbiguousPrefixLookupError:
1702 except error.AmbiguousPrefixLookupError:
1700 return False
1703 return False
1701 except error.WdirUnsupported:
1704 except error.WdirUnsupported:
1702 # single 'ff...' match
1705 # single 'ff...' match
1703 return True
1706 return True
1704 if matchednode is None:
1707 if matchednode is None:
1705 raise error.LookupError(node, self.display_id, _(b'no node'))
1708 raise error.LookupError(node, self.display_id, _(b'no node'))
1706 return True
1709 return True
1707
1710
1708 def maybewdir(prefix):
1711 def maybewdir(prefix):
1709 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1712 return all(c == b'f' for c in pycompat.iterbytestr(prefix))
1710
1713
1711 hexnode = hex(node)
1714 hexnode = hex(node)
1712
1715
1713 def disambiguate(hexnode, minlength):
1716 def disambiguate(hexnode, minlength):
1714 """Disambiguate against wdirid."""
1717 """Disambiguate against wdirid."""
1715 for length in range(minlength, len(hexnode) + 1):
1718 for length in range(minlength, len(hexnode) + 1):
1716 prefix = hexnode[:length]
1719 prefix = hexnode[:length]
1717 if not maybewdir(prefix):
1720 if not maybewdir(prefix):
1718 return prefix
1721 return prefix
1719
1722
1720 if not getattr(self, 'filteredrevs', None):
1723 if not getattr(self, 'filteredrevs', None):
1721 try:
1724 try:
1722 length = max(self.index.shortest(node), minlength)
1725 length = max(self.index.shortest(node), minlength)
1723 return disambiguate(hexnode, length)
1726 return disambiguate(hexnode, length)
1724 except error.RevlogError:
1727 except error.RevlogError:
1725 if node != self.nodeconstants.wdirid:
1728 if node != self.nodeconstants.wdirid:
1726 raise error.LookupError(
1729 raise error.LookupError(
1727 node, self.display_id, _(b'no node')
1730 node, self.display_id, _(b'no node')
1728 )
1731 )
1729 except AttributeError:
1732 except AttributeError:
1730 # Fall through to pure code
1733 # Fall through to pure code
1731 pass
1734 pass
1732
1735
1733 if node == self.nodeconstants.wdirid:
1736 if node == self.nodeconstants.wdirid:
1734 for length in range(minlength, len(hexnode) + 1):
1737 for length in range(minlength, len(hexnode) + 1):
1735 prefix = hexnode[:length]
1738 prefix = hexnode[:length]
1736 if isvalid(prefix):
1739 if isvalid(prefix):
1737 return prefix
1740 return prefix
1738
1741
1739 for length in range(minlength, len(hexnode) + 1):
1742 for length in range(minlength, len(hexnode) + 1):
1740 prefix = hexnode[:length]
1743 prefix = hexnode[:length]
1741 if isvalid(prefix):
1744 if isvalid(prefix):
1742 return disambiguate(hexnode, length)
1745 return disambiguate(hexnode, length)
1743
1746
1744 def cmp(self, node, text):
1747 def cmp(self, node, text):
1745 """compare text with a given file revision
1748 """compare text with a given file revision
1746
1749
1747 returns True if text is different than what is stored.
1750 returns True if text is different than what is stored.
1748 """
1751 """
1749 p1, p2 = self.parents(node)
1752 p1, p2 = self.parents(node)
1750 return storageutil.hashrevisionsha1(text, p1, p2) != node
1753 return storageutil.hashrevisionsha1(text, p1, p2) != node
1751
1754
1752 def _getsegmentforrevs(self, startrev, endrev, df=None):
1755 def _getsegmentforrevs(self, startrev, endrev, df=None):
1753 """Obtain a segment of raw data corresponding to a range of revisions.
1756 """Obtain a segment of raw data corresponding to a range of revisions.
1754
1757
1755 Accepts the start and end revisions and an optional already-open
1758 Accepts the start and end revisions and an optional already-open
1756 file handle to be used for reading. If the file handle is read, its
1759 file handle to be used for reading. If the file handle is read, its
1757 seek position will not be preserved.
1760 seek position will not be preserved.
1758
1761
1759 Requests for data may be satisfied by a cache.
1762 Requests for data may be satisfied by a cache.
1760
1763
1761 Returns a 2-tuple of (offset, data) for the requested range of
1764 Returns a 2-tuple of (offset, data) for the requested range of
1762 revisions. Offset is the integer offset from the beginning of the
1765 revisions. Offset is the integer offset from the beginning of the
1763 revlog and data is a str or buffer of the raw byte data.
1766 revlog and data is a str or buffer of the raw byte data.
1764
1767
1765 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1768 Callers will need to call ``self.start(rev)`` and ``self.length(rev)``
1766 to determine where each revision's data begins and ends.
1769 to determine where each revision's data begins and ends.
1767 """
1770 """
1768 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1771 # Inlined self.start(startrev) & self.end(endrev) for perf reasons
1769 # (functions are expensive).
1772 # (functions are expensive).
1770 index = self.index
1773 index = self.index
1771 istart = index[startrev]
1774 istart = index[startrev]
1772 start = int(istart[0] >> 16)
1775 start = int(istart[0] >> 16)
1773 if startrev == endrev:
1776 if startrev == endrev:
1774 end = start + istart[1]
1777 end = start + istart[1]
1775 else:
1778 else:
1776 iend = index[endrev]
1779 iend = index[endrev]
1777 end = int(iend[0] >> 16) + iend[1]
1780 end = int(iend[0] >> 16) + iend[1]
1778
1781
1779 if self._inline:
1782 if self._inline:
1780 start += (startrev + 1) * self.index.entry_size
1783 start += (startrev + 1) * self.index.entry_size
1781 end += (endrev + 1) * self.index.entry_size
1784 end += (endrev + 1) * self.index.entry_size
1782 length = end - start
1785 length = end - start
1783
1786
1784 return start, self._segmentfile.read_chunk(start, length, df)
1787 return start, self._segmentfile.read_chunk(start, length, df)
1785
1788
1786 def _chunk(self, rev, df=None):
1789 def _chunk(self, rev, df=None):
1787 """Obtain a single decompressed chunk for a revision.
1790 """Obtain a single decompressed chunk for a revision.
1788
1791
1789 Accepts an integer revision and an optional already-open file handle
1792 Accepts an integer revision and an optional already-open file handle
1790 to be used for reading. If used, the seek position of the file will not
1793 to be used for reading. If used, the seek position of the file will not
1791 be preserved.
1794 be preserved.
1792
1795
1793 Returns a str holding uncompressed data for the requested revision.
1796 Returns a str holding uncompressed data for the requested revision.
1794 """
1797 """
1795 compression_mode = self.index[rev][10]
1798 compression_mode = self.index[rev][10]
1796 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1799 data = self._getsegmentforrevs(rev, rev, df=df)[1]
1797 if compression_mode == COMP_MODE_PLAIN:
1800 if compression_mode == COMP_MODE_PLAIN:
1798 return data
1801 return data
1799 elif compression_mode == COMP_MODE_DEFAULT:
1802 elif compression_mode == COMP_MODE_DEFAULT:
1800 return self._decompressor(data)
1803 return self._decompressor(data)
1801 elif compression_mode == COMP_MODE_INLINE:
1804 elif compression_mode == COMP_MODE_INLINE:
1802 return self.decompress(data)
1805 return self.decompress(data)
1803 else:
1806 else:
1804 msg = b'unknown compression mode %d'
1807 msg = b'unknown compression mode %d'
1805 msg %= compression_mode
1808 msg %= compression_mode
1806 raise error.RevlogError(msg)
1809 raise error.RevlogError(msg)
1807
1810
1808 def _chunks(self, revs, df=None, targetsize=None):
1811 def _chunks(self, revs, df=None, targetsize=None):
1809 """Obtain decompressed chunks for the specified revisions.
1812 """Obtain decompressed chunks for the specified revisions.
1810
1813
1811 Accepts an iterable of numeric revisions that are assumed to be in
1814 Accepts an iterable of numeric revisions that are assumed to be in
1812 ascending order. Also accepts an optional already-open file handle
1815 ascending order. Also accepts an optional already-open file handle
1813 to be used for reading. If used, the seek position of the file will
1816 to be used for reading. If used, the seek position of the file will
1814 not be preserved.
1817 not be preserved.
1815
1818
1816 This function is similar to calling ``self._chunk()`` multiple times,
1819 This function is similar to calling ``self._chunk()`` multiple times,
1817 but is faster.
1820 but is faster.
1818
1821
1819 Returns a list with decompressed data for each requested revision.
1822 Returns a list with decompressed data for each requested revision.
1820 """
1823 """
1821 if not revs:
1824 if not revs:
1822 return []
1825 return []
1823 start = self.start
1826 start = self.start
1824 length = self.length
1827 length = self.length
1825 inline = self._inline
1828 inline = self._inline
1826 iosize = self.index.entry_size
1829 iosize = self.index.entry_size
1827 buffer = util.buffer
1830 buffer = util.buffer
1828
1831
1829 l = []
1832 l = []
1830 ladd = l.append
1833 ladd = l.append
1831
1834
1832 if not self._withsparseread:
1835 if not self._withsparseread:
1833 slicedchunks = (revs,)
1836 slicedchunks = (revs,)
1834 else:
1837 else:
1835 slicedchunks = deltautil.slicechunk(
1838 slicedchunks = deltautil.slicechunk(
1836 self, revs, targetsize=targetsize
1839 self, revs, targetsize=targetsize
1837 )
1840 )
1838
1841
1839 for revschunk in slicedchunks:
1842 for revschunk in slicedchunks:
1840 firstrev = revschunk[0]
1843 firstrev = revschunk[0]
1841 # Skip trailing revisions with empty diff
1844 # Skip trailing revisions with empty diff
1842 for lastrev in revschunk[::-1]:
1845 for lastrev in revschunk[::-1]:
1843 if length(lastrev) != 0:
1846 if length(lastrev) != 0:
1844 break
1847 break
1845
1848
1846 try:
1849 try:
1847 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1850 offset, data = self._getsegmentforrevs(firstrev, lastrev, df=df)
1848 except OverflowError:
1851 except OverflowError:
1849 # issue4215 - we can't cache a run of chunks greater than
1852 # issue4215 - we can't cache a run of chunks greater than
1850 # 2G on Windows
1853 # 2G on Windows
1851 return [self._chunk(rev, df=df) for rev in revschunk]
1854 return [self._chunk(rev, df=df) for rev in revschunk]
1852
1855
1853 decomp = self.decompress
1856 decomp = self.decompress
1854 # self._decompressor might be None, but will not be used in that case
1857 # self._decompressor might be None, but will not be used in that case
1855 def_decomp = self._decompressor
1858 def_decomp = self._decompressor
1856 for rev in revschunk:
1859 for rev in revschunk:
1857 chunkstart = start(rev)
1860 chunkstart = start(rev)
1858 if inline:
1861 if inline:
1859 chunkstart += (rev + 1) * iosize
1862 chunkstart += (rev + 1) * iosize
1860 chunklength = length(rev)
1863 chunklength = length(rev)
1861 comp_mode = self.index[rev][10]
1864 comp_mode = self.index[rev][10]
1862 c = buffer(data, chunkstart - offset, chunklength)
1865 c = buffer(data, chunkstart - offset, chunklength)
1863 if comp_mode == COMP_MODE_PLAIN:
1866 if comp_mode == COMP_MODE_PLAIN:
1864 ladd(c)
1867 ladd(c)
1865 elif comp_mode == COMP_MODE_INLINE:
1868 elif comp_mode == COMP_MODE_INLINE:
1866 ladd(decomp(c))
1869 ladd(decomp(c))
1867 elif comp_mode == COMP_MODE_DEFAULT:
1870 elif comp_mode == COMP_MODE_DEFAULT:
1868 ladd(def_decomp(c))
1871 ladd(def_decomp(c))
1869 else:
1872 else:
1870 msg = b'unknown compression mode %d'
1873 msg = b'unknown compression mode %d'
1871 msg %= comp_mode
1874 msg %= comp_mode
1872 raise error.RevlogError(msg)
1875 raise error.RevlogError(msg)
1873
1876
1874 return l
1877 return l
1875
1878
1876 def deltaparent(self, rev):
1879 def deltaparent(self, rev):
1877 """return deltaparent of the given revision"""
1880 """return deltaparent of the given revision"""
1878 base = self.index[rev][3]
1881 base = self.index[rev][3]
1879 if base == rev:
1882 if base == rev:
1880 return nullrev
1883 return nullrev
1881 elif self._generaldelta:
1884 elif self._generaldelta:
1882 return base
1885 return base
1883 else:
1886 else:
1884 return rev - 1
1887 return rev - 1
1885
1888
1886 def issnapshot(self, rev):
1889 def issnapshot(self, rev):
1887 """tells whether rev is a snapshot"""
1890 """tells whether rev is a snapshot"""
1888 if not self._sparserevlog:
1891 if not self._sparserevlog:
1889 return self.deltaparent(rev) == nullrev
1892 return self.deltaparent(rev) == nullrev
1890 elif util.safehasattr(self.index, 'issnapshot'):
1893 elif util.safehasattr(self.index, 'issnapshot'):
1891 # directly assign the method to cache the testing and access
1894 # directly assign the method to cache the testing and access
1892 self.issnapshot = self.index.issnapshot
1895 self.issnapshot = self.index.issnapshot
1893 return self.issnapshot(rev)
1896 return self.issnapshot(rev)
1894 if rev == nullrev:
1897 if rev == nullrev:
1895 return True
1898 return True
1896 entry = self.index[rev]
1899 entry = self.index[rev]
1897 base = entry[3]
1900 base = entry[3]
1898 if base == rev:
1901 if base == rev:
1899 return True
1902 return True
1900 if base == nullrev:
1903 if base == nullrev:
1901 return True
1904 return True
1902 p1 = entry[5]
1905 p1 = entry[5]
1903 while self.length(p1) == 0:
1906 while self.length(p1) == 0:
1904 b = self.deltaparent(p1)
1907 b = self.deltaparent(p1)
1905 if b == p1:
1908 if b == p1:
1906 break
1909 break
1907 p1 = b
1910 p1 = b
1908 p2 = entry[6]
1911 p2 = entry[6]
1909 while self.length(p2) == 0:
1912 while self.length(p2) == 0:
1910 b = self.deltaparent(p2)
1913 b = self.deltaparent(p2)
1911 if b == p2:
1914 if b == p2:
1912 break
1915 break
1913 p2 = b
1916 p2 = b
1914 if base == p1 or base == p2:
1917 if base == p1 or base == p2:
1915 return False
1918 return False
1916 return self.issnapshot(base)
1919 return self.issnapshot(base)
1917
1920
1918 def snapshotdepth(self, rev):
1921 def snapshotdepth(self, rev):
1919 """number of snapshot in the chain before this one"""
1922 """number of snapshot in the chain before this one"""
1920 if not self.issnapshot(rev):
1923 if not self.issnapshot(rev):
1921 raise error.ProgrammingError(b'revision %d not a snapshot')
1924 raise error.ProgrammingError(b'revision %d not a snapshot')
1922 return len(self._deltachain(rev)[0]) - 1
1925 return len(self._deltachain(rev)[0]) - 1
1923
1926
1924 def revdiff(self, rev1, rev2):
1927 def revdiff(self, rev1, rev2):
1925 """return or calculate a delta between two revisions
1928 """return or calculate a delta between two revisions
1926
1929
1927 The delta calculated is in binary form and is intended to be written to
1930 The delta calculated is in binary form and is intended to be written to
1928 revlog data directly. So this function needs raw revision data.
1931 revlog data directly. So this function needs raw revision data.
1929 """
1932 """
1930 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1933 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1931 return bytes(self._chunk(rev2))
1934 return bytes(self._chunk(rev2))
1932
1935
1933 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1936 return mdiff.textdiff(self.rawdata(rev1), self.rawdata(rev2))
1934
1937
1935 def revision(self, nodeorrev, _df=None):
1938 def revision(self, nodeorrev, _df=None):
1936 """return an uncompressed revision of a given node or revision
1939 """return an uncompressed revision of a given node or revision
1937 number.
1940 number.
1938
1941
1939 _df - an existing file handle to read from. (internal-only)
1942 _df - an existing file handle to read from. (internal-only)
1940 """
1943 """
1941 return self._revisiondata(nodeorrev, _df)
1944 return self._revisiondata(nodeorrev, _df)
1942
1945
1943 def sidedata(self, nodeorrev, _df=None):
1946 def sidedata(self, nodeorrev, _df=None):
1944 """a map of extra data related to the changeset but not part of the hash
1947 """a map of extra data related to the changeset but not part of the hash
1945
1948
1946 This function currently return a dictionary. However, more advanced
1949 This function currently return a dictionary. However, more advanced
1947 mapping object will likely be used in the future for a more
1950 mapping object will likely be used in the future for a more
1948 efficient/lazy code.
1951 efficient/lazy code.
1949 """
1952 """
1950 # deal with <nodeorrev> argument type
1953 # deal with <nodeorrev> argument type
1951 if isinstance(nodeorrev, int):
1954 if isinstance(nodeorrev, int):
1952 rev = nodeorrev
1955 rev = nodeorrev
1953 else:
1956 else:
1954 rev = self.rev(nodeorrev)
1957 rev = self.rev(nodeorrev)
1955 return self._sidedata(rev)
1958 return self._sidedata(rev)
1956
1959
1957 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1960 def _revisiondata(self, nodeorrev, _df=None, raw=False):
1958 # deal with <nodeorrev> argument type
1961 # deal with <nodeorrev> argument type
1959 if isinstance(nodeorrev, int):
1962 if isinstance(nodeorrev, int):
1960 rev = nodeorrev
1963 rev = nodeorrev
1961 node = self.node(rev)
1964 node = self.node(rev)
1962 else:
1965 else:
1963 node = nodeorrev
1966 node = nodeorrev
1964 rev = None
1967 rev = None
1965
1968
1966 # fast path the special `nullid` rev
1969 # fast path the special `nullid` rev
1967 if node == self.nullid:
1970 if node == self.nullid:
1968 return b""
1971 return b""
1969
1972
1970 # ``rawtext`` is the text as stored inside the revlog. Might be the
1973 # ``rawtext`` is the text as stored inside the revlog. Might be the
1971 # revision or might need to be processed to retrieve the revision.
1974 # revision or might need to be processed to retrieve the revision.
1972 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1975 rev, rawtext, validated = self._rawtext(node, rev, _df=_df)
1973
1976
1974 if raw and validated:
1977 if raw and validated:
1975 # if we don't want to process the raw text and that raw
1978 # if we don't want to process the raw text and that raw
1976 # text is cached, we can exit early.
1979 # text is cached, we can exit early.
1977 return rawtext
1980 return rawtext
1978 if rev is None:
1981 if rev is None:
1979 rev = self.rev(node)
1982 rev = self.rev(node)
1980 # the revlog's flag for this revision
1983 # the revlog's flag for this revision
1981 # (usually alter its state or content)
1984 # (usually alter its state or content)
1982 flags = self.flags(rev)
1985 flags = self.flags(rev)
1983
1986
1984 if validated and flags == REVIDX_DEFAULT_FLAGS:
1987 if validated and flags == REVIDX_DEFAULT_FLAGS:
1985 # no extra flags set, no flag processor runs, text = rawtext
1988 # no extra flags set, no flag processor runs, text = rawtext
1986 return rawtext
1989 return rawtext
1987
1990
1988 if raw:
1991 if raw:
1989 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1992 validatehash = flagutil.processflagsraw(self, rawtext, flags)
1990 text = rawtext
1993 text = rawtext
1991 else:
1994 else:
1992 r = flagutil.processflagsread(self, rawtext, flags)
1995 r = flagutil.processflagsread(self, rawtext, flags)
1993 text, validatehash = r
1996 text, validatehash = r
1994 if validatehash:
1997 if validatehash:
1995 self.checkhash(text, node, rev=rev)
1998 self.checkhash(text, node, rev=rev)
1996 if not validated:
1999 if not validated:
1997 self._revisioncache = (node, rev, rawtext)
2000 self._revisioncache = (node, rev, rawtext)
1998
2001
1999 return text
2002 return text
2000
2003
2001 def _rawtext(self, node, rev, _df=None):
2004 def _rawtext(self, node, rev, _df=None):
2002 """return the possibly unvalidated rawtext for a revision
2005 """return the possibly unvalidated rawtext for a revision
2003
2006
2004 returns (rev, rawtext, validated)
2007 returns (rev, rawtext, validated)
2005 """
2008 """
2006
2009
2007 # revision in the cache (could be useful to apply delta)
2010 # revision in the cache (could be useful to apply delta)
2008 cachedrev = None
2011 cachedrev = None
2009 # An intermediate text to apply deltas to
2012 # An intermediate text to apply deltas to
2010 basetext = None
2013 basetext = None
2011
2014
2012 # Check if we have the entry in cache
2015 # Check if we have the entry in cache
2013 # The cache entry looks like (node, rev, rawtext)
2016 # The cache entry looks like (node, rev, rawtext)
2014 if self._revisioncache:
2017 if self._revisioncache:
2015 if self._revisioncache[0] == node:
2018 if self._revisioncache[0] == node:
2016 return (rev, self._revisioncache[2], True)
2019 return (rev, self._revisioncache[2], True)
2017 cachedrev = self._revisioncache[1]
2020 cachedrev = self._revisioncache[1]
2018
2021
2019 if rev is None:
2022 if rev is None:
2020 rev = self.rev(node)
2023 rev = self.rev(node)
2021
2024
2022 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2025 chain, stopped = self._deltachain(rev, stoprev=cachedrev)
2023 if stopped:
2026 if stopped:
2024 basetext = self._revisioncache[2]
2027 basetext = self._revisioncache[2]
2025
2028
2026 # drop cache to save memory, the caller is expected to
2029 # drop cache to save memory, the caller is expected to
2027 # update self._revisioncache after validating the text
2030 # update self._revisioncache after validating the text
2028 self._revisioncache = None
2031 self._revisioncache = None
2029
2032
2030 targetsize = None
2033 targetsize = None
2031 rawsize = self.index[rev][2]
2034 rawsize = self.index[rev][2]
2032 if 0 <= rawsize:
2035 if 0 <= rawsize:
2033 targetsize = 4 * rawsize
2036 targetsize = 4 * rawsize
2034
2037
2035 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2038 bins = self._chunks(chain, df=_df, targetsize=targetsize)
2036 if basetext is None:
2039 if basetext is None:
2037 basetext = bytes(bins[0])
2040 basetext = bytes(bins[0])
2038 bins = bins[1:]
2041 bins = bins[1:]
2039
2042
2040 rawtext = mdiff.patches(basetext, bins)
2043 rawtext = mdiff.patches(basetext, bins)
2041 del basetext # let us have a chance to free memory early
2044 del basetext # let us have a chance to free memory early
2042 return (rev, rawtext, False)
2045 return (rev, rawtext, False)
2043
2046
2044 def _sidedata(self, rev):
2047 def _sidedata(self, rev):
2045 """Return the sidedata for a given revision number."""
2048 """Return the sidedata for a given revision number."""
2046 index_entry = self.index[rev]
2049 index_entry = self.index[rev]
2047 sidedata_offset = index_entry[8]
2050 sidedata_offset = index_entry[8]
2048 sidedata_size = index_entry[9]
2051 sidedata_size = index_entry[9]
2049
2052
2050 if self._inline:
2053 if self._inline:
2051 sidedata_offset += self.index.entry_size * (1 + rev)
2054 sidedata_offset += self.index.entry_size * (1 + rev)
2052 if sidedata_size == 0:
2055 if sidedata_size == 0:
2053 return {}
2056 return {}
2054
2057
2055 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
2058 if self._docket.sidedata_end < sidedata_offset + sidedata_size:
2056 filename = self._sidedatafile
2059 filename = self._sidedatafile
2057 end = self._docket.sidedata_end
2060 end = self._docket.sidedata_end
2058 offset = sidedata_offset
2061 offset = sidedata_offset
2059 length = sidedata_size
2062 length = sidedata_size
2060 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
2063 m = FILE_TOO_SHORT_MSG % (filename, length, offset, end)
2061 raise error.RevlogError(m)
2064 raise error.RevlogError(m)
2062
2065
2063 comp_segment = self._segmentfile_sidedata.read_chunk(
2066 comp_segment = self._segmentfile_sidedata.read_chunk(
2064 sidedata_offset, sidedata_size
2067 sidedata_offset, sidedata_size
2065 )
2068 )
2066
2069
2067 comp = self.index[rev][11]
2070 comp = self.index[rev][11]
2068 if comp == COMP_MODE_PLAIN:
2071 if comp == COMP_MODE_PLAIN:
2069 segment = comp_segment
2072 segment = comp_segment
2070 elif comp == COMP_MODE_DEFAULT:
2073 elif comp == COMP_MODE_DEFAULT:
2071 segment = self._decompressor(comp_segment)
2074 segment = self._decompressor(comp_segment)
2072 elif comp == COMP_MODE_INLINE:
2075 elif comp == COMP_MODE_INLINE:
2073 segment = self.decompress(comp_segment)
2076 segment = self.decompress(comp_segment)
2074 else:
2077 else:
2075 msg = b'unknown compression mode %d'
2078 msg = b'unknown compression mode %d'
2076 msg %= comp
2079 msg %= comp
2077 raise error.RevlogError(msg)
2080 raise error.RevlogError(msg)
2078
2081
2079 sidedata = sidedatautil.deserialize_sidedata(segment)
2082 sidedata = sidedatautil.deserialize_sidedata(segment)
2080 return sidedata
2083 return sidedata
2081
2084
2082 def rawdata(self, nodeorrev, _df=None):
2085 def rawdata(self, nodeorrev, _df=None):
2083 """return an uncompressed raw data of a given node or revision number.
2086 """return an uncompressed raw data of a given node or revision number.
2084
2087
2085 _df - an existing file handle to read from. (internal-only)
2088 _df - an existing file handle to read from. (internal-only)
2086 """
2089 """
2087 return self._revisiondata(nodeorrev, _df, raw=True)
2090 return self._revisiondata(nodeorrev, _df, raw=True)
2088
2091
2089 def hash(self, text, p1, p2):
2092 def hash(self, text, p1, p2):
2090 """Compute a node hash.
2093 """Compute a node hash.
2091
2094
2092 Available as a function so that subclasses can replace the hash
2095 Available as a function so that subclasses can replace the hash
2093 as needed.
2096 as needed.
2094 """
2097 """
2095 return storageutil.hashrevisionsha1(text, p1, p2)
2098 return storageutil.hashrevisionsha1(text, p1, p2)
2096
2099
2097 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2100 def checkhash(self, text, node, p1=None, p2=None, rev=None):
2098 """Check node hash integrity.
2101 """Check node hash integrity.
2099
2102
2100 Available as a function so that subclasses can extend hash mismatch
2103 Available as a function so that subclasses can extend hash mismatch
2101 behaviors as needed.
2104 behaviors as needed.
2102 """
2105 """
2103 try:
2106 try:
2104 if p1 is None and p2 is None:
2107 if p1 is None and p2 is None:
2105 p1, p2 = self.parents(node)
2108 p1, p2 = self.parents(node)
2106 if node != self.hash(text, p1, p2):
2109 if node != self.hash(text, p1, p2):
2107 # Clear the revision cache on hash failure. The revision cache
2110 # Clear the revision cache on hash failure. The revision cache
2108 # only stores the raw revision and clearing the cache does have
2111 # only stores the raw revision and clearing the cache does have
2109 # the side-effect that we won't have a cache hit when the raw
2112 # the side-effect that we won't have a cache hit when the raw
2110 # revision data is accessed. But this case should be rare and
2113 # revision data is accessed. But this case should be rare and
2111 # it is extra work to teach the cache about the hash
2114 # it is extra work to teach the cache about the hash
2112 # verification state.
2115 # verification state.
2113 if self._revisioncache and self._revisioncache[0] == node:
2116 if self._revisioncache and self._revisioncache[0] == node:
2114 self._revisioncache = None
2117 self._revisioncache = None
2115
2118
2116 revornode = rev
2119 revornode = rev
2117 if revornode is None:
2120 if revornode is None:
2118 revornode = templatefilters.short(hex(node))
2121 revornode = templatefilters.short(hex(node))
2119 raise error.RevlogError(
2122 raise error.RevlogError(
2120 _(b"integrity check failed on %s:%s")
2123 _(b"integrity check failed on %s:%s")
2121 % (self.display_id, pycompat.bytestr(revornode))
2124 % (self.display_id, pycompat.bytestr(revornode))
2122 )
2125 )
2123 except error.RevlogError:
2126 except error.RevlogError:
2124 if self._censorable and storageutil.iscensoredtext(text):
2127 if self._censorable and storageutil.iscensoredtext(text):
2125 raise error.CensoredNodeError(self.display_id, node, text)
2128 raise error.CensoredNodeError(self.display_id, node, text)
2126 raise
2129 raise
2127
2130
2128 @property
2131 @property
2129 def _split_index_file(self):
2132 def _split_index_file(self):
2130 """the path where to expect the index of an ongoing splitting operation
2133 """the path where to expect the index of an ongoing splitting operation
2131
2134
2132 The file will only exist if a splitting operation is in progress, but
2135 The file will only exist if a splitting operation is in progress, but
2133 it is always expected at the same location."""
2136 it is always expected at the same location."""
2134 parts = self.radix.split(b'/')
2137 parts = self.radix.split(b'/')
2135 if len(parts) > 1:
2138 if len(parts) > 1:
2136 # adds a '-s' prefix to the ``data/` or `meta/` base
2139 # adds a '-s' prefix to the ``data/` or `meta/` base
2137 head = parts[0] + b'-s'
2140 head = parts[0] + b'-s'
2138 mids = parts[1:-1]
2141 mids = parts[1:-1]
2139 tail = parts[-1] + b'.i'
2142 tail = parts[-1] + b'.i'
2140 pieces = [head] + mids + [tail]
2143 pieces = [head] + mids + [tail]
2141 return b'/'.join(pieces)
2144 return b'/'.join(pieces)
2142 else:
2145 else:
2143 # the revlog is stored at the root of the store (changelog or
2146 # the revlog is stored at the root of the store (changelog or
2144 # manifest), no risk of collision.
2147 # manifest), no risk of collision.
2145 return self.radix + b'.i.s'
2148 return self.radix + b'.i.s'
2146
2149
2147 def _enforceinlinesize(self, tr, side_write=True):
2150 def _enforceinlinesize(self, tr, side_write=True):
2148 """Check if the revlog is too big for inline and convert if so.
2151 """Check if the revlog is too big for inline and convert if so.
2149
2152
2150 This should be called after revisions are added to the revlog. If the
2153 This should be called after revisions are added to the revlog. If the
2151 revlog has grown too large to be an inline revlog, it will convert it
2154 revlog has grown too large to be an inline revlog, it will convert it
2152 to use multiple index and data files.
2155 to use multiple index and data files.
2153 """
2156 """
2154 tiprev = len(self) - 1
2157 tiprev = len(self) - 1
2155 total_size = self.start(tiprev) + self.length(tiprev)
2158 total_size = self.start(tiprev) + self.length(tiprev)
2156 if not self._inline or total_size < _maxinline:
2159 if not self._inline or total_size < _maxinline:
2157 return
2160 return
2158
2161
2159 troffset = tr.findoffset(self._indexfile)
2162 troffset = tr.findoffset(self._indexfile)
2160 if troffset is None:
2163 if troffset is None:
2161 raise error.RevlogError(
2164 raise error.RevlogError(
2162 _(b"%s not found in the transaction") % self._indexfile
2165 _(b"%s not found in the transaction") % self._indexfile
2163 )
2166 )
2164 if troffset:
2167 if troffset:
2165 tr.addbackup(self._indexfile, for_offset=True)
2168 tr.addbackup(self._indexfile, for_offset=True)
2166 tr.add(self._datafile, 0)
2169 tr.add(self._datafile, 0)
2167
2170
2168 existing_handles = False
2171 existing_handles = False
2169 if self._writinghandles is not None:
2172 if self._writinghandles is not None:
2170 existing_handles = True
2173 existing_handles = True
2171 fp = self._writinghandles[0]
2174 fp = self._writinghandles[0]
2172 fp.flush()
2175 fp.flush()
2173 fp.close()
2176 fp.close()
2174 # We can't use the cached file handle after close(). So prevent
2177 # We can't use the cached file handle after close(). So prevent
2175 # its usage.
2178 # its usage.
2176 self._writinghandles = None
2179 self._writinghandles = None
2177 self._segmentfile.writing_handle = None
2180 self._segmentfile.writing_handle = None
2178 # No need to deal with sidedata writing handle as it is only
2181 # No need to deal with sidedata writing handle as it is only
2179 # relevant with revlog-v2 which is never inline, not reaching
2182 # relevant with revlog-v2 which is never inline, not reaching
2180 # this code
2183 # this code
2181 if side_write:
2184 if side_write:
2182 old_index_file_path = self._indexfile
2185 old_index_file_path = self._indexfile
2183 new_index_file_path = self._split_index_file
2186 new_index_file_path = self._split_index_file
2184 opener = self.opener
2187 opener = self.opener
2185 weak_self = weakref.ref(self)
2188 weak_self = weakref.ref(self)
2186
2189
2187 # the "split" index replace the real index when the transaction is finalized
2190 # the "split" index replace the real index when the transaction is finalized
2188 def finalize_callback(tr):
2191 def finalize_callback(tr):
2189 opener.rename(
2192 opener.rename(
2190 new_index_file_path,
2193 new_index_file_path,
2191 old_index_file_path,
2194 old_index_file_path,
2192 checkambig=True,
2195 checkambig=True,
2193 )
2196 )
2194 maybe_self = weak_self()
2197 maybe_self = weak_self()
2195 if maybe_self is not None:
2198 if maybe_self is not None:
2196 maybe_self._indexfile = old_index_file_path
2199 maybe_self._indexfile = old_index_file_path
2197
2200
2198 def abort_callback(tr):
2201 def abort_callback(tr):
2199 maybe_self = weak_self()
2202 maybe_self = weak_self()
2200 if maybe_self is not None:
2203 if maybe_self is not None:
2201 maybe_self._indexfile = old_index_file_path
2204 maybe_self._indexfile = old_index_file_path
2202
2205
2203 tr.registertmp(new_index_file_path)
2206 tr.registertmp(new_index_file_path)
2204 if self.target[1] is not None:
2207 if self.target[1] is not None:
2205 callback_id = b'000-revlog-split-%d-%s' % self.target
2208 callback_id = b'000-revlog-split-%d-%s' % self.target
2206 else:
2209 else:
2207 callback_id = b'000-revlog-split-%d' % self.target[0]
2210 callback_id = b'000-revlog-split-%d' % self.target[0]
2208 tr.addfinalize(callback_id, finalize_callback)
2211 tr.addfinalize(callback_id, finalize_callback)
2209 tr.addabort(callback_id, abort_callback)
2212 tr.addabort(callback_id, abort_callback)
2210
2213
2211 new_dfh = self._datafp(b'w+')
2214 new_dfh = self._datafp(b'w+')
2212 new_dfh.truncate(0) # drop any potentially existing data
2215 new_dfh.truncate(0) # drop any potentially existing data
2213 try:
2216 try:
2214 with self._indexfp() as read_ifh:
2217 with self._indexfp() as read_ifh:
2215 for r in self:
2218 for r in self:
2216 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2219 new_dfh.write(self._getsegmentforrevs(r, r, df=read_ifh)[1])
2217 new_dfh.flush()
2220 new_dfh.flush()
2218
2221
2219 if side_write:
2222 if side_write:
2220 self._indexfile = new_index_file_path
2223 self._indexfile = new_index_file_path
2221 with self.__index_new_fp() as fp:
2224 with self.__index_new_fp() as fp:
2222 self._format_flags &= ~FLAG_INLINE_DATA
2225 self._format_flags &= ~FLAG_INLINE_DATA
2223 self._inline = False
2226 self._inline = False
2224 for i in self:
2227 for i in self:
2225 e = self.index.entry_binary(i)
2228 e = self.index.entry_binary(i)
2226 if i == 0 and self._docket is None:
2229 if i == 0 and self._docket is None:
2227 header = self._format_flags | self._format_version
2230 header = self._format_flags | self._format_version
2228 header = self.index.pack_header(header)
2231 header = self.index.pack_header(header)
2229 e = header + e
2232 e = header + e
2230 fp.write(e)
2233 fp.write(e)
2231 if self._docket is not None:
2234 if self._docket is not None:
2232 self._docket.index_end = fp.tell()
2235 self._docket.index_end = fp.tell()
2233
2236
2234 # If we don't use side-write, the temp file replace the real
2237 # If we don't use side-write, the temp file replace the real
2235 # index when we exit the context manager
2238 # index when we exit the context manager
2236
2239
2237 nodemaputil.setup_persistent_nodemap(tr, self)
2240 nodemaputil.setup_persistent_nodemap(tr, self)
2238 self._segmentfile = randomaccessfile.randomaccessfile(
2241 self._segmentfile = randomaccessfile.randomaccessfile(
2239 self.opener,
2242 self.opener,
2240 self._datafile,
2243 self._datafile,
2241 self._chunkcachesize,
2244 self._chunkcachesize,
2242 )
2245 )
2243
2246
2244 if existing_handles:
2247 if existing_handles:
2245 # switched from inline to conventional reopen the index
2248 # switched from inline to conventional reopen the index
2246 ifh = self.__index_write_fp()
2249 ifh = self.__index_write_fp()
2247 self._writinghandles = (ifh, new_dfh, None)
2250 self._writinghandles = (ifh, new_dfh, None)
2248 self._segmentfile.writing_handle = new_dfh
2251 self._segmentfile.writing_handle = new_dfh
2249 new_dfh = None
2252 new_dfh = None
2250 # No need to deal with sidedata writing handle as it is only
2253 # No need to deal with sidedata writing handle as it is only
2251 # relevant with revlog-v2 which is never inline, not reaching
2254 # relevant with revlog-v2 which is never inline, not reaching
2252 # this code
2255 # this code
2253 finally:
2256 finally:
2254 if new_dfh is not None:
2257 if new_dfh is not None:
2255 new_dfh.close()
2258 new_dfh.close()
2256
2259
2257 def _nodeduplicatecallback(self, transaction, node):
2260 def _nodeduplicatecallback(self, transaction, node):
2258 """called when trying to add a node already stored."""
2261 """called when trying to add a node already stored."""
2259
2262
2260 @contextlib.contextmanager
2263 @contextlib.contextmanager
2261 def reading(self):
2264 def reading(self):
2262 """Context manager that keeps data and sidedata files open for reading"""
2265 """Context manager that keeps data and sidedata files open for reading"""
2263 with self._segmentfile.reading():
2266 with self._segmentfile.reading():
2264 with self._segmentfile_sidedata.reading():
2267 with self._segmentfile_sidedata.reading():
2265 yield
2268 yield
2266
2269
2267 @contextlib.contextmanager
2270 @contextlib.contextmanager
2268 def _writing(self, transaction):
2271 def _writing(self, transaction):
2269 if self._trypending:
2272 if self._trypending:
2270 msg = b'try to write in a `trypending` revlog: %s'
2273 msg = b'try to write in a `trypending` revlog: %s'
2271 msg %= self.display_id
2274 msg %= self.display_id
2272 raise error.ProgrammingError(msg)
2275 raise error.ProgrammingError(msg)
2273 if self._writinghandles is not None:
2276 if self._writinghandles is not None:
2274 yield
2277 yield
2275 else:
2278 else:
2276 ifh = dfh = sdfh = None
2279 ifh = dfh = sdfh = None
2277 try:
2280 try:
2278 r = len(self)
2281 r = len(self)
2279 # opening the data file.
2282 # opening the data file.
2280 dsize = 0
2283 dsize = 0
2281 if r:
2284 if r:
2282 dsize = self.end(r - 1)
2285 dsize = self.end(r - 1)
2283 dfh = None
2286 dfh = None
2284 if not self._inline:
2287 if not self._inline:
2285 try:
2288 try:
2286 dfh = self._datafp(b"r+")
2289 dfh = self._datafp(b"r+")
2287 if self._docket is None:
2290 if self._docket is None:
2288 dfh.seek(0, os.SEEK_END)
2291 dfh.seek(0, os.SEEK_END)
2289 else:
2292 else:
2290 dfh.seek(self._docket.data_end, os.SEEK_SET)
2293 dfh.seek(self._docket.data_end, os.SEEK_SET)
2291 except FileNotFoundError:
2294 except FileNotFoundError:
2292 dfh = self._datafp(b"w+")
2295 dfh = self._datafp(b"w+")
2293 transaction.add(self._datafile, dsize)
2296 transaction.add(self._datafile, dsize)
2294 if self._sidedatafile is not None:
2297 if self._sidedatafile is not None:
2295 # revlog-v2 does not inline, help Pytype
2298 # revlog-v2 does not inline, help Pytype
2296 assert dfh is not None
2299 assert dfh is not None
2297 try:
2300 try:
2298 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2301 sdfh = self.opener(self._sidedatafile, mode=b"r+")
2299 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2302 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2300 except FileNotFoundError:
2303 except FileNotFoundError:
2301 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2304 sdfh = self.opener(self._sidedatafile, mode=b"w+")
2302 transaction.add(
2305 transaction.add(
2303 self._sidedatafile, self._docket.sidedata_end
2306 self._sidedatafile, self._docket.sidedata_end
2304 )
2307 )
2305
2308
2306 # opening the index file.
2309 # opening the index file.
2307 isize = r * self.index.entry_size
2310 isize = r * self.index.entry_size
2308 ifh = self.__index_write_fp()
2311 ifh = self.__index_write_fp()
2309 if self._inline:
2312 if self._inline:
2310 transaction.add(self._indexfile, dsize + isize)
2313 transaction.add(self._indexfile, dsize + isize)
2311 else:
2314 else:
2312 transaction.add(self._indexfile, isize)
2315 transaction.add(self._indexfile, isize)
2313 # exposing all file handle for writing.
2316 # exposing all file handle for writing.
2314 self._writinghandles = (ifh, dfh, sdfh)
2317 self._writinghandles = (ifh, dfh, sdfh)
2315 self._segmentfile.writing_handle = ifh if self._inline else dfh
2318 self._segmentfile.writing_handle = ifh if self._inline else dfh
2316 self._segmentfile_sidedata.writing_handle = sdfh
2319 self._segmentfile_sidedata.writing_handle = sdfh
2317 yield
2320 yield
2318 if self._docket is not None:
2321 if self._docket is not None:
2319 self._write_docket(transaction)
2322 self._write_docket(transaction)
2320 finally:
2323 finally:
2321 self._writinghandles = None
2324 self._writinghandles = None
2322 self._segmentfile.writing_handle = None
2325 self._segmentfile.writing_handle = None
2323 self._segmentfile_sidedata.writing_handle = None
2326 self._segmentfile_sidedata.writing_handle = None
2324 if dfh is not None:
2327 if dfh is not None:
2325 dfh.close()
2328 dfh.close()
2326 if sdfh is not None:
2329 if sdfh is not None:
2327 sdfh.close()
2330 sdfh.close()
2328 # closing the index file last to avoid exposing referent to
2331 # closing the index file last to avoid exposing referent to
2329 # potential unflushed data content.
2332 # potential unflushed data content.
2330 if ifh is not None:
2333 if ifh is not None:
2331 ifh.close()
2334 ifh.close()
2332
2335
2333 def _write_docket(self, transaction):
2336 def _write_docket(self, transaction):
2334 """write the current docket on disk
2337 """write the current docket on disk
2335
2338
2336 Exist as a method to help changelog to implement transaction logic
2339 Exist as a method to help changelog to implement transaction logic
2337
2340
2338 We could also imagine using the same transaction logic for all revlog
2341 We could also imagine using the same transaction logic for all revlog
2339 since docket are cheap."""
2342 since docket are cheap."""
2340 self._docket.write(transaction)
2343 self._docket.write(transaction)
2341
2344
2342 def addrevision(
2345 def addrevision(
2343 self,
2346 self,
2344 text,
2347 text,
2345 transaction,
2348 transaction,
2346 link,
2349 link,
2347 p1,
2350 p1,
2348 p2,
2351 p2,
2349 cachedelta=None,
2352 cachedelta=None,
2350 node=None,
2353 node=None,
2351 flags=REVIDX_DEFAULT_FLAGS,
2354 flags=REVIDX_DEFAULT_FLAGS,
2352 deltacomputer=None,
2355 deltacomputer=None,
2353 sidedata=None,
2356 sidedata=None,
2354 ):
2357 ):
2355 """add a revision to the log
2358 """add a revision to the log
2356
2359
2357 text - the revision data to add
2360 text - the revision data to add
2358 transaction - the transaction object used for rollback
2361 transaction - the transaction object used for rollback
2359 link - the linkrev data to add
2362 link - the linkrev data to add
2360 p1, p2 - the parent nodeids of the revision
2363 p1, p2 - the parent nodeids of the revision
2361 cachedelta - an optional precomputed delta
2364 cachedelta - an optional precomputed delta
2362 node - nodeid of revision; typically node is not specified, and it is
2365 node - nodeid of revision; typically node is not specified, and it is
2363 computed by default as hash(text, p1, p2), however subclasses might
2366 computed by default as hash(text, p1, p2), however subclasses might
2364 use different hashing method (and override checkhash() in such case)
2367 use different hashing method (and override checkhash() in such case)
2365 flags - the known flags to set on the revision
2368 flags - the known flags to set on the revision
2366 deltacomputer - an optional deltacomputer instance shared between
2369 deltacomputer - an optional deltacomputer instance shared between
2367 multiple calls
2370 multiple calls
2368 """
2371 """
2369 if link == nullrev:
2372 if link == nullrev:
2370 raise error.RevlogError(
2373 raise error.RevlogError(
2371 _(b"attempted to add linkrev -1 to %s") % self.display_id
2374 _(b"attempted to add linkrev -1 to %s") % self.display_id
2372 )
2375 )
2373
2376
2374 if sidedata is None:
2377 if sidedata is None:
2375 sidedata = {}
2378 sidedata = {}
2376 elif sidedata and not self.hassidedata:
2379 elif sidedata and not self.hassidedata:
2377 raise error.ProgrammingError(
2380 raise error.ProgrammingError(
2378 _(b"trying to add sidedata to a revlog who don't support them")
2381 _(b"trying to add sidedata to a revlog who don't support them")
2379 )
2382 )
2380
2383
2381 if flags:
2384 if flags:
2382 node = node or self.hash(text, p1, p2)
2385 node = node or self.hash(text, p1, p2)
2383
2386
2384 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2387 rawtext, validatehash = flagutil.processflagswrite(self, text, flags)
2385
2388
2386 # If the flag processor modifies the revision data, ignore any provided
2389 # If the flag processor modifies the revision data, ignore any provided
2387 # cachedelta.
2390 # cachedelta.
2388 if rawtext != text:
2391 if rawtext != text:
2389 cachedelta = None
2392 cachedelta = None
2390
2393
2391 if len(rawtext) > _maxentrysize:
2394 if len(rawtext) > _maxentrysize:
2392 raise error.RevlogError(
2395 raise error.RevlogError(
2393 _(
2396 _(
2394 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2397 b"%s: size of %d bytes exceeds maximum revlog storage of 2GiB"
2395 )
2398 )
2396 % (self.display_id, len(rawtext))
2399 % (self.display_id, len(rawtext))
2397 )
2400 )
2398
2401
2399 node = node or self.hash(rawtext, p1, p2)
2402 node = node or self.hash(rawtext, p1, p2)
2400 rev = self.index.get_rev(node)
2403 rev = self.index.get_rev(node)
2401 if rev is not None:
2404 if rev is not None:
2402 return rev
2405 return rev
2403
2406
2404 if validatehash:
2407 if validatehash:
2405 self.checkhash(rawtext, node, p1=p1, p2=p2)
2408 self.checkhash(rawtext, node, p1=p1, p2=p2)
2406
2409
2407 return self.addrawrevision(
2410 return self.addrawrevision(
2408 rawtext,
2411 rawtext,
2409 transaction,
2412 transaction,
2410 link,
2413 link,
2411 p1,
2414 p1,
2412 p2,
2415 p2,
2413 node,
2416 node,
2414 flags,
2417 flags,
2415 cachedelta=cachedelta,
2418 cachedelta=cachedelta,
2416 deltacomputer=deltacomputer,
2419 deltacomputer=deltacomputer,
2417 sidedata=sidedata,
2420 sidedata=sidedata,
2418 )
2421 )
2419
2422
2420 def addrawrevision(
2423 def addrawrevision(
2421 self,
2424 self,
2422 rawtext,
2425 rawtext,
2423 transaction,
2426 transaction,
2424 link,
2427 link,
2425 p1,
2428 p1,
2426 p2,
2429 p2,
2427 node,
2430 node,
2428 flags,
2431 flags,
2429 cachedelta=None,
2432 cachedelta=None,
2430 deltacomputer=None,
2433 deltacomputer=None,
2431 sidedata=None,
2434 sidedata=None,
2432 ):
2435 ):
2433 """add a raw revision with known flags, node and parents
2436 """add a raw revision with known flags, node and parents
2434 useful when reusing a revision not stored in this revlog (ex: received
2437 useful when reusing a revision not stored in this revlog (ex: received
2435 over wire, or read from an external bundle).
2438 over wire, or read from an external bundle).
2436 """
2439 """
2437 with self._writing(transaction):
2440 with self._writing(transaction):
2438 return self._addrevision(
2441 return self._addrevision(
2439 node,
2442 node,
2440 rawtext,
2443 rawtext,
2441 transaction,
2444 transaction,
2442 link,
2445 link,
2443 p1,
2446 p1,
2444 p2,
2447 p2,
2445 flags,
2448 flags,
2446 cachedelta,
2449 cachedelta,
2447 deltacomputer=deltacomputer,
2450 deltacomputer=deltacomputer,
2448 sidedata=sidedata,
2451 sidedata=sidedata,
2449 )
2452 )
2450
2453
2451 def compress(self, data):
2454 def compress(self, data):
2452 """Generate a possibly-compressed representation of data."""
2455 """Generate a possibly-compressed representation of data."""
2453 if not data:
2456 if not data:
2454 return b'', data
2457 return b'', data
2455
2458
2456 compressed = self._compressor.compress(data)
2459 compressed = self._compressor.compress(data)
2457
2460
2458 if compressed:
2461 if compressed:
2459 # The revlog compressor added the header in the returned data.
2462 # The revlog compressor added the header in the returned data.
2460 return b'', compressed
2463 return b'', compressed
2461
2464
2462 if data[0:1] == b'\0':
2465 if data[0:1] == b'\0':
2463 return b'', data
2466 return b'', data
2464 return b'u', data
2467 return b'u', data
2465
2468
2466 def decompress(self, data):
2469 def decompress(self, data):
2467 """Decompress a revlog chunk.
2470 """Decompress a revlog chunk.
2468
2471
2469 The chunk is expected to begin with a header identifying the
2472 The chunk is expected to begin with a header identifying the
2470 format type so it can be routed to an appropriate decompressor.
2473 format type so it can be routed to an appropriate decompressor.
2471 """
2474 """
2472 if not data:
2475 if not data:
2473 return data
2476 return data
2474
2477
2475 # Revlogs are read much more frequently than they are written and many
2478 # Revlogs are read much more frequently than they are written and many
2476 # chunks only take microseconds to decompress, so performance is
2479 # chunks only take microseconds to decompress, so performance is
2477 # important here.
2480 # important here.
2478 #
2481 #
2479 # We can make a few assumptions about revlogs:
2482 # We can make a few assumptions about revlogs:
2480 #
2483 #
2481 # 1) the majority of chunks will be compressed (as opposed to inline
2484 # 1) the majority of chunks will be compressed (as opposed to inline
2482 # raw data).
2485 # raw data).
2483 # 2) decompressing *any* data will likely by at least 10x slower than
2486 # 2) decompressing *any* data will likely by at least 10x slower than
2484 # returning raw inline data.
2487 # returning raw inline data.
2485 # 3) we want to prioritize common and officially supported compression
2488 # 3) we want to prioritize common and officially supported compression
2486 # engines
2489 # engines
2487 #
2490 #
2488 # It follows that we want to optimize for "decompress compressed data
2491 # It follows that we want to optimize for "decompress compressed data
2489 # when encoded with common and officially supported compression engines"
2492 # when encoded with common and officially supported compression engines"
2490 # case over "raw data" and "data encoded by less common or non-official
2493 # case over "raw data" and "data encoded by less common or non-official
2491 # compression engines." That is why we have the inline lookup first
2494 # compression engines." That is why we have the inline lookup first
2492 # followed by the compengines lookup.
2495 # followed by the compengines lookup.
2493 #
2496 #
2494 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2497 # According to `hg perfrevlogchunks`, this is ~0.5% faster for zlib
2495 # compressed chunks. And this matters for changelog and manifest reads.
2498 # compressed chunks. And this matters for changelog and manifest reads.
2496 t = data[0:1]
2499 t = data[0:1]
2497
2500
2498 if t == b'x':
2501 if t == b'x':
2499 try:
2502 try:
2500 return _zlibdecompress(data)
2503 return _zlibdecompress(data)
2501 except zlib.error as e:
2504 except zlib.error as e:
2502 raise error.RevlogError(
2505 raise error.RevlogError(
2503 _(b'revlog decompress error: %s')
2506 _(b'revlog decompress error: %s')
2504 % stringutil.forcebytestr(e)
2507 % stringutil.forcebytestr(e)
2505 )
2508 )
2506 # '\0' is more common than 'u' so it goes first.
2509 # '\0' is more common than 'u' so it goes first.
2507 elif t == b'\0':
2510 elif t == b'\0':
2508 return data
2511 return data
2509 elif t == b'u':
2512 elif t == b'u':
2510 return util.buffer(data, 1)
2513 return util.buffer(data, 1)
2511
2514
2512 compressor = self._get_decompressor(t)
2515 compressor = self._get_decompressor(t)
2513
2516
2514 return compressor.decompress(data)
2517 return compressor.decompress(data)
2515
2518
2516 def _addrevision(
2519 def _addrevision(
2517 self,
2520 self,
2518 node,
2521 node,
2519 rawtext,
2522 rawtext,
2520 transaction,
2523 transaction,
2521 link,
2524 link,
2522 p1,
2525 p1,
2523 p2,
2526 p2,
2524 flags,
2527 flags,
2525 cachedelta,
2528 cachedelta,
2526 alwayscache=False,
2529 alwayscache=False,
2527 deltacomputer=None,
2530 deltacomputer=None,
2528 sidedata=None,
2531 sidedata=None,
2529 ):
2532 ):
2530 """internal function to add revisions to the log
2533 """internal function to add revisions to the log
2531
2534
2532 see addrevision for argument descriptions.
2535 see addrevision for argument descriptions.
2533
2536
2534 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2537 note: "addrevision" takes non-raw text, "_addrevision" takes raw text.
2535
2538
2536 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2539 if "deltacomputer" is not provided or None, a defaultdeltacomputer will
2537 be used.
2540 be used.
2538
2541
2539 invariants:
2542 invariants:
2540 - rawtext is optional (can be None); if not set, cachedelta must be set.
2543 - rawtext is optional (can be None); if not set, cachedelta must be set.
2541 if both are set, they must correspond to each other.
2544 if both are set, they must correspond to each other.
2542 """
2545 """
2543 if node == self.nullid:
2546 if node == self.nullid:
2544 raise error.RevlogError(
2547 raise error.RevlogError(
2545 _(b"%s: attempt to add null revision") % self.display_id
2548 _(b"%s: attempt to add null revision") % self.display_id
2546 )
2549 )
2547 if (
2550 if (
2548 node == self.nodeconstants.wdirid
2551 node == self.nodeconstants.wdirid
2549 or node in self.nodeconstants.wdirfilenodeids
2552 or node in self.nodeconstants.wdirfilenodeids
2550 ):
2553 ):
2551 raise error.RevlogError(
2554 raise error.RevlogError(
2552 _(b"%s: attempt to add wdir revision") % self.display_id
2555 _(b"%s: attempt to add wdir revision") % self.display_id
2553 )
2556 )
2554 if self._writinghandles is None:
2557 if self._writinghandles is None:
2555 msg = b'adding revision outside `revlog._writing` context'
2558 msg = b'adding revision outside `revlog._writing` context'
2556 raise error.ProgrammingError(msg)
2559 raise error.ProgrammingError(msg)
2557
2560
2558 if self._inline:
2561 if self._inline:
2559 fh = self._writinghandles[0]
2562 fh = self._writinghandles[0]
2560 else:
2563 else:
2561 fh = self._writinghandles[1]
2564 fh = self._writinghandles[1]
2562
2565
2563 btext = [rawtext]
2566 btext = [rawtext]
2564
2567
2565 curr = len(self)
2568 curr = len(self)
2566 prev = curr - 1
2569 prev = curr - 1
2567
2570
2568 offset = self._get_data_offset(prev)
2571 offset = self._get_data_offset(prev)
2569
2572
2570 if self._concurrencychecker:
2573 if self._concurrencychecker:
2571 ifh, dfh, sdfh = self._writinghandles
2574 ifh, dfh, sdfh = self._writinghandles
2572 # XXX no checking for the sidedata file
2575 # XXX no checking for the sidedata file
2573 if self._inline:
2576 if self._inline:
2574 # offset is "as if" it were in the .d file, so we need to add on
2577 # offset is "as if" it were in the .d file, so we need to add on
2575 # the size of the entry metadata.
2578 # the size of the entry metadata.
2576 self._concurrencychecker(
2579 self._concurrencychecker(
2577 ifh, self._indexfile, offset + curr * self.index.entry_size
2580 ifh, self._indexfile, offset + curr * self.index.entry_size
2578 )
2581 )
2579 else:
2582 else:
2580 # Entries in the .i are a consistent size.
2583 # Entries in the .i are a consistent size.
2581 self._concurrencychecker(
2584 self._concurrencychecker(
2582 ifh, self._indexfile, curr * self.index.entry_size
2585 ifh, self._indexfile, curr * self.index.entry_size
2583 )
2586 )
2584 self._concurrencychecker(dfh, self._datafile, offset)
2587 self._concurrencychecker(dfh, self._datafile, offset)
2585
2588
2586 p1r, p2r = self.rev(p1), self.rev(p2)
2589 p1r, p2r = self.rev(p1), self.rev(p2)
2587
2590
2588 # full versions are inserted when the needed deltas
2591 # full versions are inserted when the needed deltas
2589 # become comparable to the uncompressed text
2592 # become comparable to the uncompressed text
2590 if rawtext is None:
2593 if rawtext is None:
2591 # need rawtext size, before changed by flag processors, which is
2594 # need rawtext size, before changed by flag processors, which is
2592 # the non-raw size. use revlog explicitly to avoid filelog's extra
2595 # the non-raw size. use revlog explicitly to avoid filelog's extra
2593 # logic that might remove metadata size.
2596 # logic that might remove metadata size.
2594 textlen = mdiff.patchedsize(
2597 textlen = mdiff.patchedsize(
2595 revlog.size(self, cachedelta[0]), cachedelta[1]
2598 revlog.size(self, cachedelta[0]), cachedelta[1]
2596 )
2599 )
2597 else:
2600 else:
2598 textlen = len(rawtext)
2601 textlen = len(rawtext)
2599
2602
2600 if deltacomputer is None:
2603 if deltacomputer is None:
2601 write_debug = None
2604 write_debug = None
2602 if self._debug_delta:
2605 if self._debug_delta:
2603 write_debug = transaction._report
2606 write_debug = transaction._report
2604 deltacomputer = deltautil.deltacomputer(
2607 deltacomputer = deltautil.deltacomputer(
2605 self, write_debug=write_debug
2608 self, write_debug=write_debug
2606 )
2609 )
2607
2610
2608 if cachedelta is not None and len(cachedelta) == 2:
2611 if cachedelta is not None and len(cachedelta) == 2:
2609 # If the cached delta has no information about how it should be
2612 # If the cached delta has no information about how it should be
2610 # reused, add the default reuse instruction according to the
2613 # reused, add the default reuse instruction according to the
2611 # revlog's configuration.
2614 # revlog's configuration.
2612 if self._generaldelta and self._lazydeltabase:
2615 if self._generaldelta and self._lazydeltabase:
2613 delta_base_reuse = DELTA_BASE_REUSE_TRY
2616 delta_base_reuse = DELTA_BASE_REUSE_TRY
2614 else:
2617 else:
2615 delta_base_reuse = DELTA_BASE_REUSE_NO
2618 delta_base_reuse = DELTA_BASE_REUSE_NO
2616 cachedelta = (cachedelta[0], cachedelta[1], delta_base_reuse)
2619 cachedelta = (cachedelta[0], cachedelta[1], delta_base_reuse)
2617
2620
2618 revinfo = revlogutils.revisioninfo(
2621 revinfo = revlogutils.revisioninfo(
2619 node,
2622 node,
2620 p1,
2623 p1,
2621 p2,
2624 p2,
2622 btext,
2625 btext,
2623 textlen,
2626 textlen,
2624 cachedelta,
2627 cachedelta,
2625 flags,
2628 flags,
2626 )
2629 )
2627
2630
2628 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2631 deltainfo = deltacomputer.finddeltainfo(revinfo, fh)
2629
2632
2630 compression_mode = COMP_MODE_INLINE
2633 compression_mode = COMP_MODE_INLINE
2631 if self._docket is not None:
2634 if self._docket is not None:
2632 default_comp = self._docket.default_compression_header
2635 default_comp = self._docket.default_compression_header
2633 r = deltautil.delta_compression(default_comp, deltainfo)
2636 r = deltautil.delta_compression(default_comp, deltainfo)
2634 compression_mode, deltainfo = r
2637 compression_mode, deltainfo = r
2635
2638
2636 sidedata_compression_mode = COMP_MODE_INLINE
2639 sidedata_compression_mode = COMP_MODE_INLINE
2637 if sidedata and self.hassidedata:
2640 if sidedata and self.hassidedata:
2638 sidedata_compression_mode = COMP_MODE_PLAIN
2641 sidedata_compression_mode = COMP_MODE_PLAIN
2639 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2642 serialized_sidedata = sidedatautil.serialize_sidedata(sidedata)
2640 sidedata_offset = self._docket.sidedata_end
2643 sidedata_offset = self._docket.sidedata_end
2641 h, comp_sidedata = self.compress(serialized_sidedata)
2644 h, comp_sidedata = self.compress(serialized_sidedata)
2642 if (
2645 if (
2643 h != b'u'
2646 h != b'u'
2644 and comp_sidedata[0:1] != b'\0'
2647 and comp_sidedata[0:1] != b'\0'
2645 and len(comp_sidedata) < len(serialized_sidedata)
2648 and len(comp_sidedata) < len(serialized_sidedata)
2646 ):
2649 ):
2647 assert not h
2650 assert not h
2648 if (
2651 if (
2649 comp_sidedata[0:1]
2652 comp_sidedata[0:1]
2650 == self._docket.default_compression_header
2653 == self._docket.default_compression_header
2651 ):
2654 ):
2652 sidedata_compression_mode = COMP_MODE_DEFAULT
2655 sidedata_compression_mode = COMP_MODE_DEFAULT
2653 serialized_sidedata = comp_sidedata
2656 serialized_sidedata = comp_sidedata
2654 else:
2657 else:
2655 sidedata_compression_mode = COMP_MODE_INLINE
2658 sidedata_compression_mode = COMP_MODE_INLINE
2656 serialized_sidedata = comp_sidedata
2659 serialized_sidedata = comp_sidedata
2657 else:
2660 else:
2658 serialized_sidedata = b""
2661 serialized_sidedata = b""
2659 # Don't store the offset if the sidedata is empty, that way
2662 # Don't store the offset if the sidedata is empty, that way
2660 # we can easily detect empty sidedata and they will be no different
2663 # we can easily detect empty sidedata and they will be no different
2661 # than ones we manually add.
2664 # than ones we manually add.
2662 sidedata_offset = 0
2665 sidedata_offset = 0
2663
2666
2664 rank = RANK_UNKNOWN
2667 rank = RANK_UNKNOWN
2665 if self._compute_rank:
2668 if self._compute_rank:
2666 if (p1r, p2r) == (nullrev, nullrev):
2669 if (p1r, p2r) == (nullrev, nullrev):
2667 rank = 1
2670 rank = 1
2668 elif p1r != nullrev and p2r == nullrev:
2671 elif p1r != nullrev and p2r == nullrev:
2669 rank = 1 + self.fast_rank(p1r)
2672 rank = 1 + self.fast_rank(p1r)
2670 elif p1r == nullrev and p2r != nullrev:
2673 elif p1r == nullrev and p2r != nullrev:
2671 rank = 1 + self.fast_rank(p2r)
2674 rank = 1 + self.fast_rank(p2r)
2672 else: # merge node
2675 else: # merge node
2673 if rustdagop is not None and self.index.rust_ext_compat:
2676 if rustdagop is not None and self.index.rust_ext_compat:
2674 rank = rustdagop.rank(self.index, p1r, p2r)
2677 rank = rustdagop.rank(self.index, p1r, p2r)
2675 else:
2678 else:
2676 pmin, pmax = sorted((p1r, p2r))
2679 pmin, pmax = sorted((p1r, p2r))
2677 rank = 1 + self.fast_rank(pmax)
2680 rank = 1 + self.fast_rank(pmax)
2678 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2681 rank += sum(1 for _ in self.findmissingrevs([pmax], [pmin]))
2679
2682
2680 e = revlogutils.entry(
2683 e = revlogutils.entry(
2681 flags=flags,
2684 flags=flags,
2682 data_offset=offset,
2685 data_offset=offset,
2683 data_compressed_length=deltainfo.deltalen,
2686 data_compressed_length=deltainfo.deltalen,
2684 data_uncompressed_length=textlen,
2687 data_uncompressed_length=textlen,
2685 data_compression_mode=compression_mode,
2688 data_compression_mode=compression_mode,
2686 data_delta_base=deltainfo.base,
2689 data_delta_base=deltainfo.base,
2687 link_rev=link,
2690 link_rev=link,
2688 parent_rev_1=p1r,
2691 parent_rev_1=p1r,
2689 parent_rev_2=p2r,
2692 parent_rev_2=p2r,
2690 node_id=node,
2693 node_id=node,
2691 sidedata_offset=sidedata_offset,
2694 sidedata_offset=sidedata_offset,
2692 sidedata_compressed_length=len(serialized_sidedata),
2695 sidedata_compressed_length=len(serialized_sidedata),
2693 sidedata_compression_mode=sidedata_compression_mode,
2696 sidedata_compression_mode=sidedata_compression_mode,
2694 rank=rank,
2697 rank=rank,
2695 )
2698 )
2696
2699
2697 self.index.append(e)
2700 self.index.append(e)
2698 entry = self.index.entry_binary(curr)
2701 entry = self.index.entry_binary(curr)
2699 if curr == 0 and self._docket is None:
2702 if curr == 0 and self._docket is None:
2700 header = self._format_flags | self._format_version
2703 header = self._format_flags | self._format_version
2701 header = self.index.pack_header(header)
2704 header = self.index.pack_header(header)
2702 entry = header + entry
2705 entry = header + entry
2703 self._writeentry(
2706 self._writeentry(
2704 transaction,
2707 transaction,
2705 entry,
2708 entry,
2706 deltainfo.data,
2709 deltainfo.data,
2707 link,
2710 link,
2708 offset,
2711 offset,
2709 serialized_sidedata,
2712 serialized_sidedata,
2710 sidedata_offset,
2713 sidedata_offset,
2711 )
2714 )
2712
2715
2713 rawtext = btext[0]
2716 rawtext = btext[0]
2714
2717
2715 if alwayscache and rawtext is None:
2718 if alwayscache and rawtext is None:
2716 rawtext = deltacomputer.buildtext(revinfo, fh)
2719 rawtext = deltacomputer.buildtext(revinfo, fh)
2717
2720
2718 if type(rawtext) == bytes: # only accept immutable objects
2721 if type(rawtext) == bytes: # only accept immutable objects
2719 self._revisioncache = (node, curr, rawtext)
2722 self._revisioncache = (node, curr, rawtext)
2720 self._chainbasecache[curr] = deltainfo.chainbase
2723 self._chainbasecache[curr] = deltainfo.chainbase
2721 return curr
2724 return curr
2722
2725
2723 def _get_data_offset(self, prev):
2726 def _get_data_offset(self, prev):
2724 """Returns the current offset in the (in-transaction) data file.
2727 """Returns the current offset in the (in-transaction) data file.
2725 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2728 Versions < 2 of the revlog can get this 0(1), revlog v2 needs a docket
2726 file to store that information: since sidedata can be rewritten to the
2729 file to store that information: since sidedata can be rewritten to the
2727 end of the data file within a transaction, you can have cases where, for
2730 end of the data file within a transaction, you can have cases where, for
2728 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2731 example, rev `n` does not have sidedata while rev `n - 1` does, leading
2729 to `n - 1`'s sidedata being written after `n`'s data.
2732 to `n - 1`'s sidedata being written after `n`'s data.
2730
2733
2731 TODO cache this in a docket file before getting out of experimental."""
2734 TODO cache this in a docket file before getting out of experimental."""
2732 if self._docket is None:
2735 if self._docket is None:
2733 return self.end(prev)
2736 return self.end(prev)
2734 else:
2737 else:
2735 return self._docket.data_end
2738 return self._docket.data_end
2736
2739
2737 def _writeentry(
2740 def _writeentry(
2738 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2741 self, transaction, entry, data, link, offset, sidedata, sidedata_offset
2739 ):
2742 ):
2740 # Files opened in a+ mode have inconsistent behavior on various
2743 # Files opened in a+ mode have inconsistent behavior on various
2741 # platforms. Windows requires that a file positioning call be made
2744 # platforms. Windows requires that a file positioning call be made
2742 # when the file handle transitions between reads and writes. See
2745 # when the file handle transitions between reads and writes. See
2743 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2746 # 3686fa2b8eee and the mixedfilemodewrapper in windows.py. On other
2744 # platforms, Python or the platform itself can be buggy. Some versions
2747 # platforms, Python or the platform itself can be buggy. Some versions
2745 # of Solaris have been observed to not append at the end of the file
2748 # of Solaris have been observed to not append at the end of the file
2746 # if the file was seeked to before the end. See issue4943 for more.
2749 # if the file was seeked to before the end. See issue4943 for more.
2747 #
2750 #
2748 # We work around this issue by inserting a seek() before writing.
2751 # We work around this issue by inserting a seek() before writing.
2749 # Note: This is likely not necessary on Python 3. However, because
2752 # Note: This is likely not necessary on Python 3. However, because
2750 # the file handle is reused for reads and may be seeked there, we need
2753 # the file handle is reused for reads and may be seeked there, we need
2751 # to be careful before changing this.
2754 # to be careful before changing this.
2752 if self._writinghandles is None:
2755 if self._writinghandles is None:
2753 msg = b'adding revision outside `revlog._writing` context'
2756 msg = b'adding revision outside `revlog._writing` context'
2754 raise error.ProgrammingError(msg)
2757 raise error.ProgrammingError(msg)
2755 ifh, dfh, sdfh = self._writinghandles
2758 ifh, dfh, sdfh = self._writinghandles
2756 if self._docket is None:
2759 if self._docket is None:
2757 ifh.seek(0, os.SEEK_END)
2760 ifh.seek(0, os.SEEK_END)
2758 else:
2761 else:
2759 ifh.seek(self._docket.index_end, os.SEEK_SET)
2762 ifh.seek(self._docket.index_end, os.SEEK_SET)
2760 if dfh:
2763 if dfh:
2761 if self._docket is None:
2764 if self._docket is None:
2762 dfh.seek(0, os.SEEK_END)
2765 dfh.seek(0, os.SEEK_END)
2763 else:
2766 else:
2764 dfh.seek(self._docket.data_end, os.SEEK_SET)
2767 dfh.seek(self._docket.data_end, os.SEEK_SET)
2765 if sdfh:
2768 if sdfh:
2766 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2769 sdfh.seek(self._docket.sidedata_end, os.SEEK_SET)
2767
2770
2768 curr = len(self) - 1
2771 curr = len(self) - 1
2769 if not self._inline:
2772 if not self._inline:
2770 transaction.add(self._datafile, offset)
2773 transaction.add(self._datafile, offset)
2771 if self._sidedatafile:
2774 if self._sidedatafile:
2772 transaction.add(self._sidedatafile, sidedata_offset)
2775 transaction.add(self._sidedatafile, sidedata_offset)
2773 transaction.add(self._indexfile, curr * len(entry))
2776 transaction.add(self._indexfile, curr * len(entry))
2774 if data[0]:
2777 if data[0]:
2775 dfh.write(data[0])
2778 dfh.write(data[0])
2776 dfh.write(data[1])
2779 dfh.write(data[1])
2777 if sidedata:
2780 if sidedata:
2778 sdfh.write(sidedata)
2781 sdfh.write(sidedata)
2779 ifh.write(entry)
2782 ifh.write(entry)
2780 else:
2783 else:
2781 offset += curr * self.index.entry_size
2784 offset += curr * self.index.entry_size
2782 transaction.add(self._indexfile, offset)
2785 transaction.add(self._indexfile, offset)
2783 ifh.write(entry)
2786 ifh.write(entry)
2784 ifh.write(data[0])
2787 ifh.write(data[0])
2785 ifh.write(data[1])
2788 ifh.write(data[1])
2786 assert not sidedata
2789 assert not sidedata
2787 self._enforceinlinesize(transaction)
2790 self._enforceinlinesize(transaction)
2788 if self._docket is not None:
2791 if self._docket is not None:
2789 # revlog-v2 always has 3 writing handles, help Pytype
2792 # revlog-v2 always has 3 writing handles, help Pytype
2790 wh1 = self._writinghandles[0]
2793 wh1 = self._writinghandles[0]
2791 wh2 = self._writinghandles[1]
2794 wh2 = self._writinghandles[1]
2792 wh3 = self._writinghandles[2]
2795 wh3 = self._writinghandles[2]
2793 assert wh1 is not None
2796 assert wh1 is not None
2794 assert wh2 is not None
2797 assert wh2 is not None
2795 assert wh3 is not None
2798 assert wh3 is not None
2796 self._docket.index_end = wh1.tell()
2799 self._docket.index_end = wh1.tell()
2797 self._docket.data_end = wh2.tell()
2800 self._docket.data_end = wh2.tell()
2798 self._docket.sidedata_end = wh3.tell()
2801 self._docket.sidedata_end = wh3.tell()
2799
2802
2800 nodemaputil.setup_persistent_nodemap(transaction, self)
2803 nodemaputil.setup_persistent_nodemap(transaction, self)
2801
2804
2802 def addgroup(
2805 def addgroup(
2803 self,
2806 self,
2804 deltas,
2807 deltas,
2805 linkmapper,
2808 linkmapper,
2806 transaction,
2809 transaction,
2807 alwayscache=False,
2810 alwayscache=False,
2808 addrevisioncb=None,
2811 addrevisioncb=None,
2809 duplicaterevisioncb=None,
2812 duplicaterevisioncb=None,
2810 debug_info=None,
2813 debug_info=None,
2811 delta_base_reuse_policy=None,
2814 delta_base_reuse_policy=None,
2812 ):
2815 ):
2813 """
2816 """
2814 add a delta group
2817 add a delta group
2815
2818
2816 given a set of deltas, add them to the revision log. the
2819 given a set of deltas, add them to the revision log. the
2817 first delta is against its parent, which should be in our
2820 first delta is against its parent, which should be in our
2818 log, the rest are against the previous delta.
2821 log, the rest are against the previous delta.
2819
2822
2820 If ``addrevisioncb`` is defined, it will be called with arguments of
2823 If ``addrevisioncb`` is defined, it will be called with arguments of
2821 this revlog and the node that was added.
2824 this revlog and the node that was added.
2822 """
2825 """
2823
2826
2824 if self._adding_group:
2827 if self._adding_group:
2825 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2828 raise error.ProgrammingError(b'cannot nest addgroup() calls')
2826
2829
2827 # read the default delta-base reuse policy from revlog config if the
2830 # read the default delta-base reuse policy from revlog config if the
2828 # group did not specify one.
2831 # group did not specify one.
2829 if delta_base_reuse_policy is None:
2832 if delta_base_reuse_policy is None:
2830 if self._generaldelta and self._lazydeltabase:
2833 if self._generaldelta and self._lazydeltabase:
2831 delta_base_reuse_policy = DELTA_BASE_REUSE_TRY
2834 delta_base_reuse_policy = DELTA_BASE_REUSE_TRY
2832 else:
2835 else:
2833 delta_base_reuse_policy = DELTA_BASE_REUSE_NO
2836 delta_base_reuse_policy = DELTA_BASE_REUSE_NO
2834
2837
2835 self._adding_group = True
2838 self._adding_group = True
2836 empty = True
2839 empty = True
2837 try:
2840 try:
2838 with self._writing(transaction):
2841 with self._writing(transaction):
2839 write_debug = None
2842 write_debug = None
2840 if self._debug_delta:
2843 if self._debug_delta:
2841 write_debug = transaction._report
2844 write_debug = transaction._report
2842 deltacomputer = deltautil.deltacomputer(
2845 deltacomputer = deltautil.deltacomputer(
2843 self,
2846 self,
2844 write_debug=write_debug,
2847 write_debug=write_debug,
2845 debug_info=debug_info,
2848 debug_info=debug_info,
2846 )
2849 )
2847 # loop through our set of deltas
2850 # loop through our set of deltas
2848 for data in deltas:
2851 for data in deltas:
2849 (
2852 (
2850 node,
2853 node,
2851 p1,
2854 p1,
2852 p2,
2855 p2,
2853 linknode,
2856 linknode,
2854 deltabase,
2857 deltabase,
2855 delta,
2858 delta,
2856 flags,
2859 flags,
2857 sidedata,
2860 sidedata,
2858 ) = data
2861 ) = data
2859 link = linkmapper(linknode)
2862 link = linkmapper(linknode)
2860 flags = flags or REVIDX_DEFAULT_FLAGS
2863 flags = flags or REVIDX_DEFAULT_FLAGS
2861
2864
2862 rev = self.index.get_rev(node)
2865 rev = self.index.get_rev(node)
2863 if rev is not None:
2866 if rev is not None:
2864 # this can happen if two branches make the same change
2867 # this can happen if two branches make the same change
2865 self._nodeduplicatecallback(transaction, rev)
2868 self._nodeduplicatecallback(transaction, rev)
2866 if duplicaterevisioncb:
2869 if duplicaterevisioncb:
2867 duplicaterevisioncb(self, rev)
2870 duplicaterevisioncb(self, rev)
2868 empty = False
2871 empty = False
2869 continue
2872 continue
2870
2873
2871 for p in (p1, p2):
2874 for p in (p1, p2):
2872 if not self.index.has_node(p):
2875 if not self.index.has_node(p):
2873 raise error.LookupError(
2876 raise error.LookupError(
2874 p, self.radix, _(b'unknown parent')
2877 p, self.radix, _(b'unknown parent')
2875 )
2878 )
2876
2879
2877 if not self.index.has_node(deltabase):
2880 if not self.index.has_node(deltabase):
2878 raise error.LookupError(
2881 raise error.LookupError(
2879 deltabase, self.display_id, _(b'unknown delta base')
2882 deltabase, self.display_id, _(b'unknown delta base')
2880 )
2883 )
2881
2884
2882 baserev = self.rev(deltabase)
2885 baserev = self.rev(deltabase)
2883
2886
2884 if baserev != nullrev and self.iscensored(baserev):
2887 if baserev != nullrev and self.iscensored(baserev):
2885 # if base is censored, delta must be full replacement in a
2888 # if base is censored, delta must be full replacement in a
2886 # single patch operation
2889 # single patch operation
2887 hlen = struct.calcsize(b">lll")
2890 hlen = struct.calcsize(b">lll")
2888 oldlen = self.rawsize(baserev)
2891 oldlen = self.rawsize(baserev)
2889 newlen = len(delta) - hlen
2892 newlen = len(delta) - hlen
2890 if delta[:hlen] != mdiff.replacediffheader(
2893 if delta[:hlen] != mdiff.replacediffheader(
2891 oldlen, newlen
2894 oldlen, newlen
2892 ):
2895 ):
2893 raise error.CensoredBaseError(
2896 raise error.CensoredBaseError(
2894 self.display_id, self.node(baserev)
2897 self.display_id, self.node(baserev)
2895 )
2898 )
2896
2899
2897 if not flags and self._peek_iscensored(baserev, delta):
2900 if not flags and self._peek_iscensored(baserev, delta):
2898 flags |= REVIDX_ISCENSORED
2901 flags |= REVIDX_ISCENSORED
2899
2902
2900 # We assume consumers of addrevisioncb will want to retrieve
2903 # We assume consumers of addrevisioncb will want to retrieve
2901 # the added revision, which will require a call to
2904 # the added revision, which will require a call to
2902 # revision(). revision() will fast path if there is a cache
2905 # revision(). revision() will fast path if there is a cache
2903 # hit. So, we tell _addrevision() to always cache in this case.
2906 # hit. So, we tell _addrevision() to always cache in this case.
2904 # We're only using addgroup() in the context of changegroup
2907 # We're only using addgroup() in the context of changegroup
2905 # generation so the revision data can always be handled as raw
2908 # generation so the revision data can always be handled as raw
2906 # by the flagprocessor.
2909 # by the flagprocessor.
2907 rev = self._addrevision(
2910 rev = self._addrevision(
2908 node,
2911 node,
2909 None,
2912 None,
2910 transaction,
2913 transaction,
2911 link,
2914 link,
2912 p1,
2915 p1,
2913 p2,
2916 p2,
2914 flags,
2917 flags,
2915 (baserev, delta, delta_base_reuse_policy),
2918 (baserev, delta, delta_base_reuse_policy),
2916 alwayscache=alwayscache,
2919 alwayscache=alwayscache,
2917 deltacomputer=deltacomputer,
2920 deltacomputer=deltacomputer,
2918 sidedata=sidedata,
2921 sidedata=sidedata,
2919 )
2922 )
2920
2923
2921 if addrevisioncb:
2924 if addrevisioncb:
2922 addrevisioncb(self, rev)
2925 addrevisioncb(self, rev)
2923 empty = False
2926 empty = False
2924 finally:
2927 finally:
2925 self._adding_group = False
2928 self._adding_group = False
2926 return not empty
2929 return not empty
2927
2930
2928 def iscensored(self, rev):
2931 def iscensored(self, rev):
2929 """Check if a file revision is censored."""
2932 """Check if a file revision is censored."""
2930 if not self._censorable:
2933 if not self._censorable:
2931 return False
2934 return False
2932
2935
2933 return self.flags(rev) & REVIDX_ISCENSORED
2936 return self.flags(rev) & REVIDX_ISCENSORED
2934
2937
2935 def _peek_iscensored(self, baserev, delta):
2938 def _peek_iscensored(self, baserev, delta):
2936 """Quickly check if a delta produces a censored revision."""
2939 """Quickly check if a delta produces a censored revision."""
2937 if not self._censorable:
2940 if not self._censorable:
2938 return False
2941 return False
2939
2942
2940 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2943 return storageutil.deltaiscensored(delta, baserev, self.rawsize)
2941
2944
2942 def getstrippoint(self, minlink):
2945 def getstrippoint(self, minlink):
2943 """find the minimum rev that must be stripped to strip the linkrev
2946 """find the minimum rev that must be stripped to strip the linkrev
2944
2947
2945 Returns a tuple containing the minimum rev and a set of all revs that
2948 Returns a tuple containing the minimum rev and a set of all revs that
2946 have linkrevs that will be broken by this strip.
2949 have linkrevs that will be broken by this strip.
2947 """
2950 """
2948 return storageutil.resolvestripinfo(
2951 return storageutil.resolvestripinfo(
2949 minlink,
2952 minlink,
2950 len(self) - 1,
2953 len(self) - 1,
2951 self.headrevs(),
2954 self.headrevs(),
2952 self.linkrev,
2955 self.linkrev,
2953 self.parentrevs,
2956 self.parentrevs,
2954 )
2957 )
2955
2958
2956 def strip(self, minlink, transaction):
2959 def strip(self, minlink, transaction):
2957 """truncate the revlog on the first revision with a linkrev >= minlink
2960 """truncate the revlog on the first revision with a linkrev >= minlink
2958
2961
2959 This function is called when we're stripping revision minlink and
2962 This function is called when we're stripping revision minlink and
2960 its descendants from the repository.
2963 its descendants from the repository.
2961
2964
2962 We have to remove all revisions with linkrev >= minlink, because
2965 We have to remove all revisions with linkrev >= minlink, because
2963 the equivalent changelog revisions will be renumbered after the
2966 the equivalent changelog revisions will be renumbered after the
2964 strip.
2967 strip.
2965
2968
2966 So we truncate the revlog on the first of these revisions, and
2969 So we truncate the revlog on the first of these revisions, and
2967 trust that the caller has saved the revisions that shouldn't be
2970 trust that the caller has saved the revisions that shouldn't be
2968 removed and that it'll re-add them after this truncation.
2971 removed and that it'll re-add them after this truncation.
2969 """
2972 """
2970 if len(self) == 0:
2973 if len(self) == 0:
2971 return
2974 return
2972
2975
2973 rev, _ = self.getstrippoint(minlink)
2976 rev, _ = self.getstrippoint(minlink)
2974 if rev == len(self):
2977 if rev == len(self):
2975 return
2978 return
2976
2979
2977 # first truncate the files on disk
2980 # first truncate the files on disk
2978 data_end = self.start(rev)
2981 data_end = self.start(rev)
2979 if not self._inline:
2982 if not self._inline:
2980 transaction.add(self._datafile, data_end)
2983 transaction.add(self._datafile, data_end)
2981 end = rev * self.index.entry_size
2984 end = rev * self.index.entry_size
2982 else:
2985 else:
2983 end = data_end + (rev * self.index.entry_size)
2986 end = data_end + (rev * self.index.entry_size)
2984
2987
2985 if self._sidedatafile:
2988 if self._sidedatafile:
2986 sidedata_end = self.sidedata_cut_off(rev)
2989 sidedata_end = self.sidedata_cut_off(rev)
2987 transaction.add(self._sidedatafile, sidedata_end)
2990 transaction.add(self._sidedatafile, sidedata_end)
2988
2991
2989 transaction.add(self._indexfile, end)
2992 transaction.add(self._indexfile, end)
2990 if self._docket is not None:
2993 if self._docket is not None:
2991 # XXX we could, leverage the docket while stripping. However it is
2994 # XXX we could, leverage the docket while stripping. However it is
2992 # not powerfull enough at the time of this comment
2995 # not powerfull enough at the time of this comment
2993 self._docket.index_end = end
2996 self._docket.index_end = end
2994 self._docket.data_end = data_end
2997 self._docket.data_end = data_end
2995 self._docket.sidedata_end = sidedata_end
2998 self._docket.sidedata_end = sidedata_end
2996 self._docket.write(transaction, stripping=True)
2999 self._docket.write(transaction, stripping=True)
2997
3000
2998 # then reset internal state in memory to forget those revisions
3001 # then reset internal state in memory to forget those revisions
2999 self._revisioncache = None
3002 self._revisioncache = None
3000 self._chaininfocache = util.lrucachedict(500)
3003 self._chaininfocache = util.lrucachedict(500)
3001 self._segmentfile.clear_cache()
3004 self._segmentfile.clear_cache()
3002 self._segmentfile_sidedata.clear_cache()
3005 self._segmentfile_sidedata.clear_cache()
3003
3006
3004 del self.index[rev:-1]
3007 del self.index[rev:-1]
3005
3008
3006 def checksize(self):
3009 def checksize(self):
3007 """Check size of index and data files
3010 """Check size of index and data files
3008
3011
3009 return a (dd, di) tuple.
3012 return a (dd, di) tuple.
3010 - dd: extra bytes for the "data" file
3013 - dd: extra bytes for the "data" file
3011 - di: extra bytes for the "index" file
3014 - di: extra bytes for the "index" file
3012
3015
3013 A healthy revlog will return (0, 0).
3016 A healthy revlog will return (0, 0).
3014 """
3017 """
3015 expected = 0
3018 expected = 0
3016 if len(self):
3019 if len(self):
3017 expected = max(0, self.end(len(self) - 1))
3020 expected = max(0, self.end(len(self) - 1))
3018
3021
3019 try:
3022 try:
3020 with self._datafp() as f:
3023 with self._datafp() as f:
3021 f.seek(0, io.SEEK_END)
3024 f.seek(0, io.SEEK_END)
3022 actual = f.tell()
3025 actual = f.tell()
3023 dd = actual - expected
3026 dd = actual - expected
3024 except FileNotFoundError:
3027 except FileNotFoundError:
3025 dd = 0
3028 dd = 0
3026
3029
3027 try:
3030 try:
3028 f = self.opener(self._indexfile)
3031 f = self.opener(self._indexfile)
3029 f.seek(0, io.SEEK_END)
3032 f.seek(0, io.SEEK_END)
3030 actual = f.tell()
3033 actual = f.tell()
3031 f.close()
3034 f.close()
3032 s = self.index.entry_size
3035 s = self.index.entry_size
3033 i = max(0, actual // s)
3036 i = max(0, actual // s)
3034 di = actual - (i * s)
3037 di = actual - (i * s)
3035 if self._inline:
3038 if self._inline:
3036 databytes = 0
3039 databytes = 0
3037 for r in self:
3040 for r in self:
3038 databytes += max(0, self.length(r))
3041 databytes += max(0, self.length(r))
3039 dd = 0
3042 dd = 0
3040 di = actual - len(self) * s - databytes
3043 di = actual - len(self) * s - databytes
3041 except FileNotFoundError:
3044 except FileNotFoundError:
3042 di = 0
3045 di = 0
3043
3046
3044 return (dd, di)
3047 return (dd, di)
3045
3048
3046 def files(self):
3049 def files(self):
3047 res = [self._indexfile]
3050 res = [self._indexfile]
3048 if self._docket_file is None:
3051 if self._docket_file is None:
3049 if not self._inline:
3052 if not self._inline:
3050 res.append(self._datafile)
3053 res.append(self._datafile)
3051 else:
3054 else:
3052 res.append(self._docket_file)
3055 res.append(self._docket_file)
3053 res.extend(self._docket.old_index_filepaths(include_empty=False))
3056 res.extend(self._docket.old_index_filepaths(include_empty=False))
3054 if self._docket.data_end:
3057 if self._docket.data_end:
3055 res.append(self._datafile)
3058 res.append(self._datafile)
3056 res.extend(self._docket.old_data_filepaths(include_empty=False))
3059 res.extend(self._docket.old_data_filepaths(include_empty=False))
3057 if self._docket.sidedata_end:
3060 if self._docket.sidedata_end:
3058 res.append(self._sidedatafile)
3061 res.append(self._sidedatafile)
3059 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
3062 res.extend(self._docket.old_sidedata_filepaths(include_empty=False))
3060 return res
3063 return res
3061
3064
3062 def emitrevisions(
3065 def emitrevisions(
3063 self,
3066 self,
3064 nodes,
3067 nodes,
3065 nodesorder=None,
3068 nodesorder=None,
3066 revisiondata=False,
3069 revisiondata=False,
3067 assumehaveparentrevisions=False,
3070 assumehaveparentrevisions=False,
3068 deltamode=repository.CG_DELTAMODE_STD,
3071 deltamode=repository.CG_DELTAMODE_STD,
3069 sidedata_helpers=None,
3072 sidedata_helpers=None,
3070 debug_info=None,
3073 debug_info=None,
3071 ):
3074 ):
3072 if nodesorder not in (b'nodes', b'storage', b'linear', None):
3075 if nodesorder not in (b'nodes', b'storage', b'linear', None):
3073 raise error.ProgrammingError(
3076 raise error.ProgrammingError(
3074 b'unhandled value for nodesorder: %s' % nodesorder
3077 b'unhandled value for nodesorder: %s' % nodesorder
3075 )
3078 )
3076
3079
3077 if nodesorder is None and not self._generaldelta:
3080 if nodesorder is None and not self._generaldelta:
3078 nodesorder = b'storage'
3081 nodesorder = b'storage'
3079
3082
3080 if (
3083 if (
3081 not self._storedeltachains
3084 not self._storedeltachains
3082 and deltamode != repository.CG_DELTAMODE_PREV
3085 and deltamode != repository.CG_DELTAMODE_PREV
3083 ):
3086 ):
3084 deltamode = repository.CG_DELTAMODE_FULL
3087 deltamode = repository.CG_DELTAMODE_FULL
3085
3088
3086 return storageutil.emitrevisions(
3089 return storageutil.emitrevisions(
3087 self,
3090 self,
3088 nodes,
3091 nodes,
3089 nodesorder,
3092 nodesorder,
3090 revlogrevisiondelta,
3093 revlogrevisiondelta,
3091 deltaparentfn=self.deltaparent,
3094 deltaparentfn=self.deltaparent,
3092 candeltafn=self.candelta,
3095 candeltafn=self.candelta,
3093 rawsizefn=self.rawsize,
3096 rawsizefn=self.rawsize,
3094 revdifffn=self.revdiff,
3097 revdifffn=self.revdiff,
3095 flagsfn=self.flags,
3098 flagsfn=self.flags,
3096 deltamode=deltamode,
3099 deltamode=deltamode,
3097 revisiondata=revisiondata,
3100 revisiondata=revisiondata,
3098 assumehaveparentrevisions=assumehaveparentrevisions,
3101 assumehaveparentrevisions=assumehaveparentrevisions,
3099 sidedata_helpers=sidedata_helpers,
3102 sidedata_helpers=sidedata_helpers,
3100 debug_info=debug_info,
3103 debug_info=debug_info,
3101 )
3104 )
3102
3105
3103 DELTAREUSEALWAYS = b'always'
3106 DELTAREUSEALWAYS = b'always'
3104 DELTAREUSESAMEREVS = b'samerevs'
3107 DELTAREUSESAMEREVS = b'samerevs'
3105 DELTAREUSENEVER = b'never'
3108 DELTAREUSENEVER = b'never'
3106
3109
3107 DELTAREUSEFULLADD = b'fulladd'
3110 DELTAREUSEFULLADD = b'fulladd'
3108
3111
3109 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
3112 DELTAREUSEALL = {b'always', b'samerevs', b'never', b'fulladd'}
3110
3113
3111 def clone(
3114 def clone(
3112 self,
3115 self,
3113 tr,
3116 tr,
3114 destrevlog,
3117 destrevlog,
3115 addrevisioncb=None,
3118 addrevisioncb=None,
3116 deltareuse=DELTAREUSESAMEREVS,
3119 deltareuse=DELTAREUSESAMEREVS,
3117 forcedeltabothparents=None,
3120 forcedeltabothparents=None,
3118 sidedata_helpers=None,
3121 sidedata_helpers=None,
3119 ):
3122 ):
3120 """Copy this revlog to another, possibly with format changes.
3123 """Copy this revlog to another, possibly with format changes.
3121
3124
3122 The destination revlog will contain the same revisions and nodes.
3125 The destination revlog will contain the same revisions and nodes.
3123 However, it may not be bit-for-bit identical due to e.g. delta encoding
3126 However, it may not be bit-for-bit identical due to e.g. delta encoding
3124 differences.
3127 differences.
3125
3128
3126 The ``deltareuse`` argument control how deltas from the existing revlog
3129 The ``deltareuse`` argument control how deltas from the existing revlog
3127 are preserved in the destination revlog. The argument can have the
3130 are preserved in the destination revlog. The argument can have the
3128 following values:
3131 following values:
3129
3132
3130 DELTAREUSEALWAYS
3133 DELTAREUSEALWAYS
3131 Deltas will always be reused (if possible), even if the destination
3134 Deltas will always be reused (if possible), even if the destination
3132 revlog would not select the same revisions for the delta. This is the
3135 revlog would not select the same revisions for the delta. This is the
3133 fastest mode of operation.
3136 fastest mode of operation.
3134 DELTAREUSESAMEREVS
3137 DELTAREUSESAMEREVS
3135 Deltas will be reused if the destination revlog would pick the same
3138 Deltas will be reused if the destination revlog would pick the same
3136 revisions for the delta. This mode strikes a balance between speed
3139 revisions for the delta. This mode strikes a balance between speed
3137 and optimization.
3140 and optimization.
3138 DELTAREUSENEVER
3141 DELTAREUSENEVER
3139 Deltas will never be reused. This is the slowest mode of execution.
3142 Deltas will never be reused. This is the slowest mode of execution.
3140 This mode can be used to recompute deltas (e.g. if the diff/delta
3143 This mode can be used to recompute deltas (e.g. if the diff/delta
3141 algorithm changes).
3144 algorithm changes).
3142 DELTAREUSEFULLADD
3145 DELTAREUSEFULLADD
3143 Revision will be re-added as if their were new content. This is
3146 Revision will be re-added as if their were new content. This is
3144 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
3147 slower than DELTAREUSEALWAYS but allow more mechanism to kicks in.
3145 eg: large file detection and handling.
3148 eg: large file detection and handling.
3146
3149
3147 Delta computation can be slow, so the choice of delta reuse policy can
3150 Delta computation can be slow, so the choice of delta reuse policy can
3148 significantly affect run time.
3151 significantly affect run time.
3149
3152
3150 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3153 The default policy (``DELTAREUSESAMEREVS``) strikes a balance between
3151 two extremes. Deltas will be reused if they are appropriate. But if the
3154 two extremes. Deltas will be reused if they are appropriate. But if the
3152 delta could choose a better revision, it will do so. This means if you
3155 delta could choose a better revision, it will do so. This means if you
3153 are converting a non-generaldelta revlog to a generaldelta revlog,
3156 are converting a non-generaldelta revlog to a generaldelta revlog,
3154 deltas will be recomputed if the delta's parent isn't a parent of the
3157 deltas will be recomputed if the delta's parent isn't a parent of the
3155 revision.
3158 revision.
3156
3159
3157 In addition to the delta policy, the ``forcedeltabothparents``
3160 In addition to the delta policy, the ``forcedeltabothparents``
3158 argument controls whether to force compute deltas against both parents
3161 argument controls whether to force compute deltas against both parents
3159 for merges. By default, the current default is used.
3162 for merges. By default, the current default is used.
3160
3163
3161 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3164 See `revlogutil.sidedata.get_sidedata_helpers` for the doc on
3162 `sidedata_helpers`.
3165 `sidedata_helpers`.
3163 """
3166 """
3164 if deltareuse not in self.DELTAREUSEALL:
3167 if deltareuse not in self.DELTAREUSEALL:
3165 raise ValueError(
3168 raise ValueError(
3166 _(b'value for deltareuse invalid: %s') % deltareuse
3169 _(b'value for deltareuse invalid: %s') % deltareuse
3167 )
3170 )
3168
3171
3169 if len(destrevlog):
3172 if len(destrevlog):
3170 raise ValueError(_(b'destination revlog is not empty'))
3173 raise ValueError(_(b'destination revlog is not empty'))
3171
3174
3172 if getattr(self, 'filteredrevs', None):
3175 if getattr(self, 'filteredrevs', None):
3173 raise ValueError(_(b'source revlog has filtered revisions'))
3176 raise ValueError(_(b'source revlog has filtered revisions'))
3174 if getattr(destrevlog, 'filteredrevs', None):
3177 if getattr(destrevlog, 'filteredrevs', None):
3175 raise ValueError(_(b'destination revlog has filtered revisions'))
3178 raise ValueError(_(b'destination revlog has filtered revisions'))
3176
3179
3177 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3180 # lazydelta and lazydeltabase controls whether to reuse a cached delta,
3178 # if possible.
3181 # if possible.
3179 oldlazydelta = destrevlog._lazydelta
3182 oldlazydelta = destrevlog._lazydelta
3180 oldlazydeltabase = destrevlog._lazydeltabase
3183 oldlazydeltabase = destrevlog._lazydeltabase
3181 oldamd = destrevlog._deltabothparents
3184 oldamd = destrevlog._deltabothparents
3182
3185
3183 try:
3186 try:
3184 if deltareuse == self.DELTAREUSEALWAYS:
3187 if deltareuse == self.DELTAREUSEALWAYS:
3185 destrevlog._lazydeltabase = True
3188 destrevlog._lazydeltabase = True
3186 destrevlog._lazydelta = True
3189 destrevlog._lazydelta = True
3187 elif deltareuse == self.DELTAREUSESAMEREVS:
3190 elif deltareuse == self.DELTAREUSESAMEREVS:
3188 destrevlog._lazydeltabase = False
3191 destrevlog._lazydeltabase = False
3189 destrevlog._lazydelta = True
3192 destrevlog._lazydelta = True
3190 elif deltareuse == self.DELTAREUSENEVER:
3193 elif deltareuse == self.DELTAREUSENEVER:
3191 destrevlog._lazydeltabase = False
3194 destrevlog._lazydeltabase = False
3192 destrevlog._lazydelta = False
3195 destrevlog._lazydelta = False
3193
3196
3194 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3197 destrevlog._deltabothparents = forcedeltabothparents or oldamd
3195
3198
3196 self._clone(
3199 self._clone(
3197 tr,
3200 tr,
3198 destrevlog,
3201 destrevlog,
3199 addrevisioncb,
3202 addrevisioncb,
3200 deltareuse,
3203 deltareuse,
3201 forcedeltabothparents,
3204 forcedeltabothparents,
3202 sidedata_helpers,
3205 sidedata_helpers,
3203 )
3206 )
3204
3207
3205 finally:
3208 finally:
3206 destrevlog._lazydelta = oldlazydelta
3209 destrevlog._lazydelta = oldlazydelta
3207 destrevlog._lazydeltabase = oldlazydeltabase
3210 destrevlog._lazydeltabase = oldlazydeltabase
3208 destrevlog._deltabothparents = oldamd
3211 destrevlog._deltabothparents = oldamd
3209
3212
3210 def _clone(
3213 def _clone(
3211 self,
3214 self,
3212 tr,
3215 tr,
3213 destrevlog,
3216 destrevlog,
3214 addrevisioncb,
3217 addrevisioncb,
3215 deltareuse,
3218 deltareuse,
3216 forcedeltabothparents,
3219 forcedeltabothparents,
3217 sidedata_helpers,
3220 sidedata_helpers,
3218 ):
3221 ):
3219 """perform the core duty of `revlog.clone` after parameter processing"""
3222 """perform the core duty of `revlog.clone` after parameter processing"""
3220 write_debug = None
3223 write_debug = None
3221 if self._debug_delta:
3224 if self._debug_delta:
3222 write_debug = tr._report
3225 write_debug = tr._report
3223 deltacomputer = deltautil.deltacomputer(
3226 deltacomputer = deltautil.deltacomputer(
3224 destrevlog,
3227 destrevlog,
3225 write_debug=write_debug,
3228 write_debug=write_debug,
3226 )
3229 )
3227 index = self.index
3230 index = self.index
3228 for rev in self:
3231 for rev in self:
3229 entry = index[rev]
3232 entry = index[rev]
3230
3233
3231 # Some classes override linkrev to take filtered revs into
3234 # Some classes override linkrev to take filtered revs into
3232 # account. Use raw entry from index.
3235 # account. Use raw entry from index.
3233 flags = entry[0] & 0xFFFF
3236 flags = entry[0] & 0xFFFF
3234 linkrev = entry[4]
3237 linkrev = entry[4]
3235 p1 = index[entry[5]][7]
3238 p1 = index[entry[5]][7]
3236 p2 = index[entry[6]][7]
3239 p2 = index[entry[6]][7]
3237 node = entry[7]
3240 node = entry[7]
3238
3241
3239 # (Possibly) reuse the delta from the revlog if allowed and
3242 # (Possibly) reuse the delta from the revlog if allowed and
3240 # the revlog chunk is a delta.
3243 # the revlog chunk is a delta.
3241 cachedelta = None
3244 cachedelta = None
3242 rawtext = None
3245 rawtext = None
3243 if deltareuse == self.DELTAREUSEFULLADD:
3246 if deltareuse == self.DELTAREUSEFULLADD:
3244 text = self._revisiondata(rev)
3247 text = self._revisiondata(rev)
3245 sidedata = self.sidedata(rev)
3248 sidedata = self.sidedata(rev)
3246
3249
3247 if sidedata_helpers is not None:
3250 if sidedata_helpers is not None:
3248 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3251 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3249 self, sidedata_helpers, sidedata, rev
3252 self, sidedata_helpers, sidedata, rev
3250 )
3253 )
3251 flags = flags | new_flags[0] & ~new_flags[1]
3254 flags = flags | new_flags[0] & ~new_flags[1]
3252
3255
3253 destrevlog.addrevision(
3256 destrevlog.addrevision(
3254 text,
3257 text,
3255 tr,
3258 tr,
3256 linkrev,
3259 linkrev,
3257 p1,
3260 p1,
3258 p2,
3261 p2,
3259 cachedelta=cachedelta,
3262 cachedelta=cachedelta,
3260 node=node,
3263 node=node,
3261 flags=flags,
3264 flags=flags,
3262 deltacomputer=deltacomputer,
3265 deltacomputer=deltacomputer,
3263 sidedata=sidedata,
3266 sidedata=sidedata,
3264 )
3267 )
3265 else:
3268 else:
3266 if destrevlog._lazydelta:
3269 if destrevlog._lazydelta:
3267 dp = self.deltaparent(rev)
3270 dp = self.deltaparent(rev)
3268 if dp != nullrev:
3271 if dp != nullrev:
3269 cachedelta = (dp, bytes(self._chunk(rev)))
3272 cachedelta = (dp, bytes(self._chunk(rev)))
3270
3273
3271 sidedata = None
3274 sidedata = None
3272 if not cachedelta:
3275 if not cachedelta:
3273 rawtext = self._revisiondata(rev)
3276 rawtext = self._revisiondata(rev)
3274 sidedata = self.sidedata(rev)
3277 sidedata = self.sidedata(rev)
3275 if sidedata is None:
3278 if sidedata is None:
3276 sidedata = self.sidedata(rev)
3279 sidedata = self.sidedata(rev)
3277
3280
3278 if sidedata_helpers is not None:
3281 if sidedata_helpers is not None:
3279 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3282 (sidedata, new_flags) = sidedatautil.run_sidedata_helpers(
3280 self, sidedata_helpers, sidedata, rev
3283 self, sidedata_helpers, sidedata, rev
3281 )
3284 )
3282 flags = flags | new_flags[0] & ~new_flags[1]
3285 flags = flags | new_flags[0] & ~new_flags[1]
3283
3286
3284 with destrevlog._writing(tr):
3287 with destrevlog._writing(tr):
3285 destrevlog._addrevision(
3288 destrevlog._addrevision(
3286 node,
3289 node,
3287 rawtext,
3290 rawtext,
3288 tr,
3291 tr,
3289 linkrev,
3292 linkrev,
3290 p1,
3293 p1,
3291 p2,
3294 p2,
3292 flags,
3295 flags,
3293 cachedelta,
3296 cachedelta,
3294 deltacomputer=deltacomputer,
3297 deltacomputer=deltacomputer,
3295 sidedata=sidedata,
3298 sidedata=sidedata,
3296 )
3299 )
3297
3300
3298 if addrevisioncb:
3301 if addrevisioncb:
3299 addrevisioncb(self, rev, node)
3302 addrevisioncb(self, rev, node)
3300
3303
3301 def censorrevision(self, tr, censornode, tombstone=b''):
3304 def censorrevision(self, tr, censornode, tombstone=b''):
3302 if self._format_version == REVLOGV0:
3305 if self._format_version == REVLOGV0:
3303 raise error.RevlogError(
3306 raise error.RevlogError(
3304 _(b'cannot censor with version %d revlogs')
3307 _(b'cannot censor with version %d revlogs')
3305 % self._format_version
3308 % self._format_version
3306 )
3309 )
3307 elif self._format_version == REVLOGV1:
3310 elif self._format_version == REVLOGV1:
3308 rewrite.v1_censor(self, tr, censornode, tombstone)
3311 rewrite.v1_censor(self, tr, censornode, tombstone)
3309 else:
3312 else:
3310 rewrite.v2_censor(self, tr, censornode, tombstone)
3313 rewrite.v2_censor(self, tr, censornode, tombstone)
3311
3314
3312 def verifyintegrity(self, state):
3315 def verifyintegrity(self, state):
3313 """Verifies the integrity of the revlog.
3316 """Verifies the integrity of the revlog.
3314
3317
3315 Yields ``revlogproblem`` instances describing problems that are
3318 Yields ``revlogproblem`` instances describing problems that are
3316 found.
3319 found.
3317 """
3320 """
3318 dd, di = self.checksize()
3321 dd, di = self.checksize()
3319 if dd:
3322 if dd:
3320 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3323 yield revlogproblem(error=_(b'data length off by %d bytes') % dd)
3321 if di:
3324 if di:
3322 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3325 yield revlogproblem(error=_(b'index contains %d extra bytes') % di)
3323
3326
3324 version = self._format_version
3327 version = self._format_version
3325
3328
3326 # The verifier tells us what version revlog we should be.
3329 # The verifier tells us what version revlog we should be.
3327 if version != state[b'expectedversion']:
3330 if version != state[b'expectedversion']:
3328 yield revlogproblem(
3331 yield revlogproblem(
3329 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3332 warning=_(b"warning: '%s' uses revlog format %d; expected %d")
3330 % (self.display_id, version, state[b'expectedversion'])
3333 % (self.display_id, version, state[b'expectedversion'])
3331 )
3334 )
3332
3335
3333 state[b'skipread'] = set()
3336 state[b'skipread'] = set()
3334 state[b'safe_renamed'] = set()
3337 state[b'safe_renamed'] = set()
3335
3338
3336 for rev in self:
3339 for rev in self:
3337 node = self.node(rev)
3340 node = self.node(rev)
3338
3341
3339 # Verify contents. 4 cases to care about:
3342 # Verify contents. 4 cases to care about:
3340 #
3343 #
3341 # common: the most common case
3344 # common: the most common case
3342 # rename: with a rename
3345 # rename: with a rename
3343 # meta: file content starts with b'\1\n', the metadata
3346 # meta: file content starts with b'\1\n', the metadata
3344 # header defined in filelog.py, but without a rename
3347 # header defined in filelog.py, but without a rename
3345 # ext: content stored externally
3348 # ext: content stored externally
3346 #
3349 #
3347 # More formally, their differences are shown below:
3350 # More formally, their differences are shown below:
3348 #
3351 #
3349 # | common | rename | meta | ext
3352 # | common | rename | meta | ext
3350 # -------------------------------------------------------
3353 # -------------------------------------------------------
3351 # flags() | 0 | 0 | 0 | not 0
3354 # flags() | 0 | 0 | 0 | not 0
3352 # renamed() | False | True | False | ?
3355 # renamed() | False | True | False | ?
3353 # rawtext[0:2]=='\1\n'| False | True | True | ?
3356 # rawtext[0:2]=='\1\n'| False | True | True | ?
3354 #
3357 #
3355 # "rawtext" means the raw text stored in revlog data, which
3358 # "rawtext" means the raw text stored in revlog data, which
3356 # could be retrieved by "rawdata(rev)". "text"
3359 # could be retrieved by "rawdata(rev)". "text"
3357 # mentioned below is "revision(rev)".
3360 # mentioned below is "revision(rev)".
3358 #
3361 #
3359 # There are 3 different lengths stored physically:
3362 # There are 3 different lengths stored physically:
3360 # 1. L1: rawsize, stored in revlog index
3363 # 1. L1: rawsize, stored in revlog index
3361 # 2. L2: len(rawtext), stored in revlog data
3364 # 2. L2: len(rawtext), stored in revlog data
3362 # 3. L3: len(text), stored in revlog data if flags==0, or
3365 # 3. L3: len(text), stored in revlog data if flags==0, or
3363 # possibly somewhere else if flags!=0
3366 # possibly somewhere else if flags!=0
3364 #
3367 #
3365 # L1 should be equal to L2. L3 could be different from them.
3368 # L1 should be equal to L2. L3 could be different from them.
3366 # "text" may or may not affect commit hash depending on flag
3369 # "text" may or may not affect commit hash depending on flag
3367 # processors (see flagutil.addflagprocessor).
3370 # processors (see flagutil.addflagprocessor).
3368 #
3371 #
3369 # | common | rename | meta | ext
3372 # | common | rename | meta | ext
3370 # -------------------------------------------------
3373 # -------------------------------------------------
3371 # rawsize() | L1 | L1 | L1 | L1
3374 # rawsize() | L1 | L1 | L1 | L1
3372 # size() | L1 | L2-LM | L1(*) | L1 (?)
3375 # size() | L1 | L2-LM | L1(*) | L1 (?)
3373 # len(rawtext) | L2 | L2 | L2 | L2
3376 # len(rawtext) | L2 | L2 | L2 | L2
3374 # len(text) | L2 | L2 | L2 | L3
3377 # len(text) | L2 | L2 | L2 | L3
3375 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3378 # len(read()) | L2 | L2-LM | L2-LM | L3 (?)
3376 #
3379 #
3377 # LM: length of metadata, depending on rawtext
3380 # LM: length of metadata, depending on rawtext
3378 # (*): not ideal, see comment in filelog.size
3381 # (*): not ideal, see comment in filelog.size
3379 # (?): could be "- len(meta)" if the resolved content has
3382 # (?): could be "- len(meta)" if the resolved content has
3380 # rename metadata
3383 # rename metadata
3381 #
3384 #
3382 # Checks needed to be done:
3385 # Checks needed to be done:
3383 # 1. length check: L1 == L2, in all cases.
3386 # 1. length check: L1 == L2, in all cases.
3384 # 2. hash check: depending on flag processor, we may need to
3387 # 2. hash check: depending on flag processor, we may need to
3385 # use either "text" (external), or "rawtext" (in revlog).
3388 # use either "text" (external), or "rawtext" (in revlog).
3386
3389
3387 try:
3390 try:
3388 skipflags = state.get(b'skipflags', 0)
3391 skipflags = state.get(b'skipflags', 0)
3389 if skipflags:
3392 if skipflags:
3390 skipflags &= self.flags(rev)
3393 skipflags &= self.flags(rev)
3391
3394
3392 _verify_revision(self, skipflags, state, node)
3395 _verify_revision(self, skipflags, state, node)
3393
3396
3394 l1 = self.rawsize(rev)
3397 l1 = self.rawsize(rev)
3395 l2 = len(self.rawdata(node))
3398 l2 = len(self.rawdata(node))
3396
3399
3397 if l1 != l2:
3400 if l1 != l2:
3398 yield revlogproblem(
3401 yield revlogproblem(
3399 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3402 error=_(b'unpacked size is %d, %d expected') % (l2, l1),
3400 node=node,
3403 node=node,
3401 )
3404 )
3402
3405
3403 except error.CensoredNodeError:
3406 except error.CensoredNodeError:
3404 if state[b'erroroncensored']:
3407 if state[b'erroroncensored']:
3405 yield revlogproblem(
3408 yield revlogproblem(
3406 error=_(b'censored file data'), node=node
3409 error=_(b'censored file data'), node=node
3407 )
3410 )
3408 state[b'skipread'].add(node)
3411 state[b'skipread'].add(node)
3409 except Exception as e:
3412 except Exception as e:
3410 yield revlogproblem(
3413 yield revlogproblem(
3411 error=_(b'unpacking %s: %s')
3414 error=_(b'unpacking %s: %s')
3412 % (short(node), stringutil.forcebytestr(e)),
3415 % (short(node), stringutil.forcebytestr(e)),
3413 node=node,
3416 node=node,
3414 )
3417 )
3415 state[b'skipread'].add(node)
3418 state[b'skipread'].add(node)
3416
3419
3417 def storageinfo(
3420 def storageinfo(
3418 self,
3421 self,
3419 exclusivefiles=False,
3422 exclusivefiles=False,
3420 sharedfiles=False,
3423 sharedfiles=False,
3421 revisionscount=False,
3424 revisionscount=False,
3422 trackedsize=False,
3425 trackedsize=False,
3423 storedsize=False,
3426 storedsize=False,
3424 ):
3427 ):
3425 d = {}
3428 d = {}
3426
3429
3427 if exclusivefiles:
3430 if exclusivefiles:
3428 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3431 d[b'exclusivefiles'] = [(self.opener, self._indexfile)]
3429 if not self._inline:
3432 if not self._inline:
3430 d[b'exclusivefiles'].append((self.opener, self._datafile))
3433 d[b'exclusivefiles'].append((self.opener, self._datafile))
3431
3434
3432 if sharedfiles:
3435 if sharedfiles:
3433 d[b'sharedfiles'] = []
3436 d[b'sharedfiles'] = []
3434
3437
3435 if revisionscount:
3438 if revisionscount:
3436 d[b'revisionscount'] = len(self)
3439 d[b'revisionscount'] = len(self)
3437
3440
3438 if trackedsize:
3441 if trackedsize:
3439 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3442 d[b'trackedsize'] = sum(map(self.rawsize, iter(self)))
3440
3443
3441 if storedsize:
3444 if storedsize:
3442 d[b'storedsize'] = sum(
3445 d[b'storedsize'] = sum(
3443 self.opener.stat(path).st_size for path in self.files()
3446 self.opener.stat(path).st_size for path in self.files()
3444 )
3447 )
3445
3448
3446 return d
3449 return d
3447
3450
3448 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3451 def rewrite_sidedata(self, transaction, helpers, startrev, endrev):
3449 if not self.hassidedata:
3452 if not self.hassidedata:
3450 return
3453 return
3451 # revlog formats with sidedata support does not support inline
3454 # revlog formats with sidedata support does not support inline
3452 assert not self._inline
3455 assert not self._inline
3453 if not helpers[1] and not helpers[2]:
3456 if not helpers[1] and not helpers[2]:
3454 # Nothing to generate or remove
3457 # Nothing to generate or remove
3455 return
3458 return
3456
3459
3457 new_entries = []
3460 new_entries = []
3458 # append the new sidedata
3461 # append the new sidedata
3459 with self._writing(transaction):
3462 with self._writing(transaction):
3460 ifh, dfh, sdfh = self._writinghandles
3463 ifh, dfh, sdfh = self._writinghandles
3461 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3464 dfh.seek(self._docket.sidedata_end, os.SEEK_SET)
3462
3465
3463 current_offset = sdfh.tell()
3466 current_offset = sdfh.tell()
3464 for rev in range(startrev, endrev + 1):
3467 for rev in range(startrev, endrev + 1):
3465 entry = self.index[rev]
3468 entry = self.index[rev]
3466 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3469 new_sidedata, flags = sidedatautil.run_sidedata_helpers(
3467 store=self,
3470 store=self,
3468 sidedata_helpers=helpers,
3471 sidedata_helpers=helpers,
3469 sidedata={},
3472 sidedata={},
3470 rev=rev,
3473 rev=rev,
3471 )
3474 )
3472
3475
3473 serialized_sidedata = sidedatautil.serialize_sidedata(
3476 serialized_sidedata = sidedatautil.serialize_sidedata(
3474 new_sidedata
3477 new_sidedata
3475 )
3478 )
3476
3479
3477 sidedata_compression_mode = COMP_MODE_INLINE
3480 sidedata_compression_mode = COMP_MODE_INLINE
3478 if serialized_sidedata and self.hassidedata:
3481 if serialized_sidedata and self.hassidedata:
3479 sidedata_compression_mode = COMP_MODE_PLAIN
3482 sidedata_compression_mode = COMP_MODE_PLAIN
3480 h, comp_sidedata = self.compress(serialized_sidedata)
3483 h, comp_sidedata = self.compress(serialized_sidedata)
3481 if (
3484 if (
3482 h != b'u'
3485 h != b'u'
3483 and comp_sidedata[0] != b'\0'
3486 and comp_sidedata[0] != b'\0'
3484 and len(comp_sidedata) < len(serialized_sidedata)
3487 and len(comp_sidedata) < len(serialized_sidedata)
3485 ):
3488 ):
3486 assert not h
3489 assert not h
3487 if (
3490 if (
3488 comp_sidedata[0]
3491 comp_sidedata[0]
3489 == self._docket.default_compression_header
3492 == self._docket.default_compression_header
3490 ):
3493 ):
3491 sidedata_compression_mode = COMP_MODE_DEFAULT
3494 sidedata_compression_mode = COMP_MODE_DEFAULT
3492 serialized_sidedata = comp_sidedata
3495 serialized_sidedata = comp_sidedata
3493 else:
3496 else:
3494 sidedata_compression_mode = COMP_MODE_INLINE
3497 sidedata_compression_mode = COMP_MODE_INLINE
3495 serialized_sidedata = comp_sidedata
3498 serialized_sidedata = comp_sidedata
3496 if entry[8] != 0 or entry[9] != 0:
3499 if entry[8] != 0 or entry[9] != 0:
3497 # rewriting entries that already have sidedata is not
3500 # rewriting entries that already have sidedata is not
3498 # supported yet, because it introduces garbage data in the
3501 # supported yet, because it introduces garbage data in the
3499 # revlog.
3502 # revlog.
3500 msg = b"rewriting existing sidedata is not supported yet"
3503 msg = b"rewriting existing sidedata is not supported yet"
3501 raise error.Abort(msg)
3504 raise error.Abort(msg)
3502
3505
3503 # Apply (potential) flags to add and to remove after running
3506 # Apply (potential) flags to add and to remove after running
3504 # the sidedata helpers
3507 # the sidedata helpers
3505 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3508 new_offset_flags = entry[0] | flags[0] & ~flags[1]
3506 entry_update = (
3509 entry_update = (
3507 current_offset,
3510 current_offset,
3508 len(serialized_sidedata),
3511 len(serialized_sidedata),
3509 new_offset_flags,
3512 new_offset_flags,
3510 sidedata_compression_mode,
3513 sidedata_compression_mode,
3511 )
3514 )
3512
3515
3513 # the sidedata computation might have move the file cursors around
3516 # the sidedata computation might have move the file cursors around
3514 sdfh.seek(current_offset, os.SEEK_SET)
3517 sdfh.seek(current_offset, os.SEEK_SET)
3515 sdfh.write(serialized_sidedata)
3518 sdfh.write(serialized_sidedata)
3516 new_entries.append(entry_update)
3519 new_entries.append(entry_update)
3517 current_offset += len(serialized_sidedata)
3520 current_offset += len(serialized_sidedata)
3518 self._docket.sidedata_end = sdfh.tell()
3521 self._docket.sidedata_end = sdfh.tell()
3519
3522
3520 # rewrite the new index entries
3523 # rewrite the new index entries
3521 ifh.seek(startrev * self.index.entry_size)
3524 ifh.seek(startrev * self.index.entry_size)
3522 for i, e in enumerate(new_entries):
3525 for i, e in enumerate(new_entries):
3523 rev = startrev + i
3526 rev = startrev + i
3524 self.index.replace_sidedata_info(rev, *e)
3527 self.index.replace_sidedata_info(rev, *e)
3525 packed = self.index.entry_binary(rev)
3528 packed = self.index.entry_binary(rev)
3526 if rev == 0 and self._docket is None:
3529 if rev == 0 and self._docket is None:
3527 header = self._format_flags | self._format_version
3530 header = self._format_flags | self._format_version
3528 header = self.index.pack_header(header)
3531 header = self.index.pack_header(header)
3529 packed = header + packed
3532 packed = header + packed
3530 ifh.write(packed)
3533 ifh.write(packed)
@@ -1,982 +1,994 b''
1 #require serve no-reposimplestore no-chg
1 #require serve no-reposimplestore no-chg
2
2
3 #testcases stream-legacy stream-bundle2-v2 stream-bundle2-v3
3 #testcases stream-legacy stream-bundle2-v2 stream-bundle2-v3
4
4
5 #if stream-legacy
5 #if stream-legacy
6 $ cat << EOF >> $HGRCPATH
6 $ cat << EOF >> $HGRCPATH
7 > [server]
7 > [server]
8 > bundle2.stream = no
8 > bundle2.stream = no
9 > EOF
9 > EOF
10 #endif
10 #endif
11 #if stream-bundle2-v3
11 #if stream-bundle2-v3
12 $ cat << EOF >> $HGRCPATH
12 $ cat << EOF >> $HGRCPATH
13 > [experimental]
13 > [experimental]
14 > stream-v3 = yes
14 > stream-v3 = yes
15 > EOF
15 > EOF
16 #endif
16 #endif
17
17
18 Initialize repository
18 Initialize repository
19
19
20 $ hg init server
20 $ hg init server
21 $ cd server
21 $ cd server
22 $ sh $TESTDIR/testlib/stream_clone_setup.sh
22 $ sh $TESTDIR/testlib/stream_clone_setup.sh
23 adding 00changelog-ab349180a0405010.nd
23 adding 00changelog-ab349180a0405010.nd
24 adding 00changelog.d
24 adding 00changelog.d
25 adding 00changelog.i
25 adding 00changelog.i
26 adding 00changelog.n
26 adding 00changelog.n
27 adding 00manifest.d
27 adding 00manifest.d
28 adding 00manifest.i
28 adding 00manifest.i
29 adding container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch
29 adding container/isam-build-centos7/bazel-coverage-generator-sandboxfs-compatibility-0758e3e4f6057904d44399bd666faba9e7f40686.patch
30 adding data/foo.d
30 adding data/foo.d
31 adding data/foo.i
31 adding data/foo.i
32 adding data/foo.n
32 adding data/foo.n
33 adding data/undo.babar
33 adding data/undo.babar
34 adding data/undo.d
34 adding data/undo.d
35 adding data/undo.foo.d
35 adding data/undo.foo.d
36 adding data/undo.foo.i
36 adding data/undo.foo.i
37 adding data/undo.foo.n
37 adding data/undo.foo.n
38 adding data/undo.i
38 adding data/undo.i
39 adding data/undo.n
39 adding data/undo.n
40 adding data/undo.py
40 adding data/undo.py
41 adding foo.d
41 adding foo.d
42 adding foo.i
42 adding foo.i
43 adding foo.n
43 adding foo.n
44 adding meta/foo.d
44 adding meta/foo.d
45 adding meta/foo.i
45 adding meta/foo.i
46 adding meta/foo.n
46 adding meta/foo.n
47 adding meta/undo.babar
47 adding meta/undo.babar
48 adding meta/undo.d
48 adding meta/undo.d
49 adding meta/undo.foo.d
49 adding meta/undo.foo.d
50 adding meta/undo.foo.i
50 adding meta/undo.foo.i
51 adding meta/undo.foo.n
51 adding meta/undo.foo.n
52 adding meta/undo.i
52 adding meta/undo.i
53 adding meta/undo.n
53 adding meta/undo.n
54 adding meta/undo.py
54 adding meta/undo.py
55 adding savanah/foo.d
55 adding savanah/foo.d
56 adding savanah/foo.i
56 adding savanah/foo.i
57 adding savanah/foo.n
57 adding savanah/foo.n
58 adding savanah/undo.babar
58 adding savanah/undo.babar
59 adding savanah/undo.d
59 adding savanah/undo.d
60 adding savanah/undo.foo.d
60 adding savanah/undo.foo.d
61 adding savanah/undo.foo.i
61 adding savanah/undo.foo.i
62 adding savanah/undo.foo.n
62 adding savanah/undo.foo.n
63 adding savanah/undo.i
63 adding savanah/undo.i
64 adding savanah/undo.n
64 adding savanah/undo.n
65 adding savanah/undo.py
65 adding savanah/undo.py
66 adding store/C\xc3\xa9lesteVille_is_a_Capital_City (esc)
66 adding store/C\xc3\xa9lesteVille_is_a_Capital_City (esc)
67 adding store/foo.d
67 adding store/foo.d
68 adding store/foo.i
68 adding store/foo.i
69 adding store/foo.n
69 adding store/foo.n
70 adding store/undo.babar
70 adding store/undo.babar
71 adding store/undo.d
71 adding store/undo.d
72 adding store/undo.foo.d
72 adding store/undo.foo.d
73 adding store/undo.foo.i
73 adding store/undo.foo.i
74 adding store/undo.foo.n
74 adding store/undo.foo.n
75 adding store/undo.i
75 adding store/undo.i
76 adding store/undo.n
76 adding store/undo.n
77 adding store/undo.py
77 adding store/undo.py
78 adding undo.babar
78 adding undo.babar
79 adding undo.d
79 adding undo.d
80 adding undo.foo.d
80 adding undo.foo.d
81 adding undo.foo.i
81 adding undo.foo.i
82 adding undo.foo.n
82 adding undo.foo.n
83 adding undo.i
83 adding undo.i
84 adding undo.n
84 adding undo.n
85 adding undo.py
85 adding undo.py
86
86
87 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
87 $ hg --config server.uncompressed=false serve -p $HGPORT -d --pid-file=hg.pid
88 $ cat hg.pid > $DAEMON_PIDS
88 $ cat hg.pid > $DAEMON_PIDS
89 $ cd ..
89 $ cd ..
90
90
91 Check local clone
91 Check local clone
92 ==================
92 ==================
93
93
94 The logic is close enough of uncompressed.
94 The logic is close enough of uncompressed.
95 This is present here to reuse the testing around file with "special" names.
95 This is present here to reuse the testing around file with "special" names.
96
96
97 $ hg clone server local-clone
97 $ hg clone server local-clone
98 updating to branch default
98 updating to branch default
99 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
99 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
100
100
101 Check that the clone went well
101 Check that the clone went well
102
102
103 $ hg verify -R local-clone -q
103 $ hg verify -R local-clone -q
104
104
105 Check uncompressed
105 Check uncompressed
106 ==================
106 ==================
107
107
108 Cannot stream clone when server.uncompressed is set
108 Cannot stream clone when server.uncompressed is set
109
109
110 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
110 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=stream_out'
111 200 Script output follows
111 200 Script output follows
112
112
113 1
113 1
114
114
115 #if stream-legacy
115 #if stream-legacy
116 $ hg debugcapabilities http://localhost:$HGPORT
116 $ hg debugcapabilities http://localhost:$HGPORT
117 Main capabilities:
117 Main capabilities:
118 batch
118 batch
119 branchmap
119 branchmap
120 $USUAL_BUNDLE2_CAPS_SERVER$
120 $USUAL_BUNDLE2_CAPS_SERVER$
121 changegroupsubset
121 changegroupsubset
122 compression=$BUNDLE2_COMPRESSIONS$
122 compression=$BUNDLE2_COMPRESSIONS$
123 getbundle
123 getbundle
124 httpheader=1024
124 httpheader=1024
125 httpmediatype=0.1rx,0.1tx,0.2tx
125 httpmediatype=0.1rx,0.1tx,0.2tx
126 known
126 known
127 lookup
127 lookup
128 pushkey
128 pushkey
129 unbundle=HG10GZ,HG10BZ,HG10UN
129 unbundle=HG10GZ,HG10BZ,HG10UN
130 unbundlehash
130 unbundlehash
131 Bundle2 capabilities:
131 Bundle2 capabilities:
132 HG20
132 HG20
133 bookmarks
133 bookmarks
134 changegroup
134 changegroup
135 01
135 01
136 02
136 02
137 03
137 03
138 checkheads
138 checkheads
139 related
139 related
140 digests
140 digests
141 md5
141 md5
142 sha1
142 sha1
143 sha512
143 sha512
144 error
144 error
145 abort
145 abort
146 unsupportedcontent
146 unsupportedcontent
147 pushraced
147 pushraced
148 pushkey
148 pushkey
149 hgtagsfnodes
149 hgtagsfnodes
150 listkeys
150 listkeys
151 phases
151 phases
152 heads
152 heads
153 pushkey
153 pushkey
154 remote-changegroup
154 remote-changegroup
155 http
155 http
156 https
156 https
157
157
158 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
158 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
159 warning: stream clone requested but server has them disabled
159 warning: stream clone requested but server has them disabled
160 requesting all changes
160 requesting all changes
161 adding changesets
161 adding changesets
162 adding manifests
162 adding manifests
163 adding file changes
163 adding file changes
164 added 3 changesets with 1088 changes to 1088 files
164 added 3 changesets with 1088 changes to 1088 files
165 new changesets 96ee1d7354c4:5223b5e3265f
165 new changesets 96ee1d7354c4:5223b5e3265f
166
166
167 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
167 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
168 200 Script output follows
168 200 Script output follows
169 content-type: application/mercurial-0.2
169 content-type: application/mercurial-0.2
170
170
171
171
172 $ f --size body --hexdump --bytes 100
172 $ f --size body --hexdump --bytes 100
173 body: size=140
173 body: size=140
174 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
174 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
175 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
175 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
176 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
176 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
177 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
177 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
178 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
178 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
179 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
179 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
180 0060: 69 73 20 66 |is f|
180 0060: 69 73 20 66 |is f|
181
181
182 #endif
182 #endif
183 #if stream-bundle2-v2
183 #if stream-bundle2-v2
184 $ hg debugcapabilities http://localhost:$HGPORT
184 $ hg debugcapabilities http://localhost:$HGPORT
185 Main capabilities:
185 Main capabilities:
186 batch
186 batch
187 branchmap
187 branchmap
188 $USUAL_BUNDLE2_CAPS_SERVER$
188 $USUAL_BUNDLE2_CAPS_SERVER$
189 changegroupsubset
189 changegroupsubset
190 compression=$BUNDLE2_COMPRESSIONS$
190 compression=$BUNDLE2_COMPRESSIONS$
191 getbundle
191 getbundle
192 httpheader=1024
192 httpheader=1024
193 httpmediatype=0.1rx,0.1tx,0.2tx
193 httpmediatype=0.1rx,0.1tx,0.2tx
194 known
194 known
195 lookup
195 lookup
196 pushkey
196 pushkey
197 unbundle=HG10GZ,HG10BZ,HG10UN
197 unbundle=HG10GZ,HG10BZ,HG10UN
198 unbundlehash
198 unbundlehash
199 Bundle2 capabilities:
199 Bundle2 capabilities:
200 HG20
200 HG20
201 bookmarks
201 bookmarks
202 changegroup
202 changegroup
203 01
203 01
204 02
204 02
205 03
205 03
206 checkheads
206 checkheads
207 related
207 related
208 digests
208 digests
209 md5
209 md5
210 sha1
210 sha1
211 sha512
211 sha512
212 error
212 error
213 abort
213 abort
214 unsupportedcontent
214 unsupportedcontent
215 pushraced
215 pushraced
216 pushkey
216 pushkey
217 hgtagsfnodes
217 hgtagsfnodes
218 listkeys
218 listkeys
219 phases
219 phases
220 heads
220 heads
221 pushkey
221 pushkey
222 remote-changegroup
222 remote-changegroup
223 http
223 http
224 https
224 https
225
225
226 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
226 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
227 warning: stream clone requested but server has them disabled
227 warning: stream clone requested but server has them disabled
228 requesting all changes
228 requesting all changes
229 adding changesets
229 adding changesets
230 adding manifests
230 adding manifests
231 adding file changes
231 adding file changes
232 added 3 changesets with 1088 changes to 1088 files
232 added 3 changesets with 1088 changes to 1088 files
233 new changesets 96ee1d7354c4:5223b5e3265f
233 new changesets 96ee1d7354c4:5223b5e3265f
234
234
235 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
235 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
236 200 Script output follows
236 200 Script output follows
237 content-type: application/mercurial-0.2
237 content-type: application/mercurial-0.2
238
238
239
239
240 $ f --size body --hexdump --bytes 100
240 $ f --size body --hexdump --bytes 100
241 body: size=140
241 body: size=140
242 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
242 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
243 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
243 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
244 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
244 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
245 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
245 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
246 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
246 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
247 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
247 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
248 0060: 69 73 20 66 |is f|
248 0060: 69 73 20 66 |is f|
249
249
250 #endif
250 #endif
251 #if stream-bundle2-v3
251 #if stream-bundle2-v3
252 $ hg debugcapabilities http://localhost:$HGPORT
252 $ hg debugcapabilities http://localhost:$HGPORT
253 Main capabilities:
253 Main capabilities:
254 batch
254 batch
255 branchmap
255 branchmap
256 $USUAL_BUNDLE2_CAPS_SERVER$
256 $USUAL_BUNDLE2_CAPS_SERVER$
257 changegroupsubset
257 changegroupsubset
258 compression=$BUNDLE2_COMPRESSIONS$
258 compression=$BUNDLE2_COMPRESSIONS$
259 getbundle
259 getbundle
260 httpheader=1024
260 httpheader=1024
261 httpmediatype=0.1rx,0.1tx,0.2tx
261 httpmediatype=0.1rx,0.1tx,0.2tx
262 known
262 known
263 lookup
263 lookup
264 pushkey
264 pushkey
265 unbundle=HG10GZ,HG10BZ,HG10UN
265 unbundle=HG10GZ,HG10BZ,HG10UN
266 unbundlehash
266 unbundlehash
267 Bundle2 capabilities:
267 Bundle2 capabilities:
268 HG20
268 HG20
269 bookmarks
269 bookmarks
270 changegroup
270 changegroup
271 01
271 01
272 02
272 02
273 03
273 03
274 checkheads
274 checkheads
275 related
275 related
276 digests
276 digests
277 md5
277 md5
278 sha1
278 sha1
279 sha512
279 sha512
280 error
280 error
281 abort
281 abort
282 unsupportedcontent
282 unsupportedcontent
283 pushraced
283 pushraced
284 pushkey
284 pushkey
285 hgtagsfnodes
285 hgtagsfnodes
286 listkeys
286 listkeys
287 phases
287 phases
288 heads
288 heads
289 pushkey
289 pushkey
290 remote-changegroup
290 remote-changegroup
291 http
291 http
292 https
292 https
293
293
294 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
294 $ hg clone --stream -U http://localhost:$HGPORT server-disabled
295 warning: stream clone requested but server has them disabled
295 warning: stream clone requested but server has them disabled
296 requesting all changes
296 requesting all changes
297 adding changesets
297 adding changesets
298 adding manifests
298 adding manifests
299 adding file changes
299 adding file changes
300 added 3 changesets with 1088 changes to 1088 files
300 added 3 changesets with 1088 changes to 1088 files
301 new changesets 96ee1d7354c4:5223b5e3265f
301 new changesets 96ee1d7354c4:5223b5e3265f
302
302
303 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
303 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto 0.2 --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
304 200 Script output follows
304 200 Script output follows
305 content-type: application/mercurial-0.2
305 content-type: application/mercurial-0.2
306
306
307
307
308 $ f --size body --hexdump --bytes 100
308 $ f --size body --hexdump --bytes 100
309 body: size=140
309 body: size=140
310 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
310 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
311 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
311 0010: 73 0b 45 52 52 4f 52 3a 41 42 4f 52 54 00 00 00 |s.ERROR:ABORT...|
312 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
312 0020: 00 01 01 07 3c 04 16 6d 65 73 73 61 67 65 73 74 |....<..messagest|
313 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
313 0030: 72 65 61 6d 20 64 61 74 61 20 72 65 71 75 65 73 |ream data reques|
314 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
314 0040: 74 65 64 20 62 75 74 20 73 65 72 76 65 72 20 64 |ted but server d|
315 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
315 0050: 6f 65 73 20 6e 6f 74 20 61 6c 6c 6f 77 20 74 68 |oes not allow th|
316 0060: 69 73 20 66 |is f|
316 0060: 69 73 20 66 |is f|
317
317
318 #endif
318 #endif
319
319
320 $ killdaemons.py
320 $ killdaemons.py
321 $ cd server
321 $ cd server
322 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
322 $ hg serve -p $HGPORT -d --pid-file=hg.pid --error errors.txt
323 $ cat hg.pid > $DAEMON_PIDS
323 $ cat hg.pid > $DAEMON_PIDS
324 $ cd ..
324 $ cd ..
325
325
326 Basic clone
326 Basic clone
327
327
328 #if stream-legacy
328 #if stream-legacy
329 $ hg clone --stream -U http://localhost:$HGPORT clone1
329 $ hg clone --stream -U http://localhost:$HGPORT clone1
330 streaming all changes
330 streaming all changes
331 1090 files to transfer, 102 KB of data (no-zstd !)
331 1090 files to transfer, 102 KB of data (no-zstd !)
332 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
332 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
333 1090 files to transfer, 98.8 KB of data (zstd !)
333 1090 files to transfer, 98.8 KB of data (zstd !)
334 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
334 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
335 searching for changes
335 searching for changes
336 no changes found
336 no changes found
337 $ cat server/errors.txt
337 $ cat server/errors.txt
338 #endif
338 #endif
339 #if stream-bundle2-v2
339 #if stream-bundle2-v2
340 $ hg clone --stream -U http://localhost:$HGPORT clone1
340 $ hg clone --stream -U http://localhost:$HGPORT clone1
341 streaming all changes
341 streaming all changes
342 1093 files to transfer, 102 KB of data (no-zstd !)
342 1093 files to transfer, 102 KB of data (no-zstd !)
343 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
343 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
344 1093 files to transfer, 98.9 KB of data (zstd !)
344 1093 files to transfer, 98.9 KB of data (zstd !)
345 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
345 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
346
346
347 $ ls -1 clone1/.hg/cache
347 $ ls -1 clone1/.hg/cache
348 branch2-base
348 branch2-base
349 branch2-immutable
349 branch2-immutable
350 branch2-served
350 branch2-served
351 branch2-served.hidden
351 branch2-served.hidden
352 branch2-visible
352 branch2-visible
353 branch2-visible-hidden
353 branch2-visible-hidden
354 rbc-names-v1
354 rbc-names-v1
355 rbc-revs-v1
355 rbc-revs-v1
356 tags2
356 tags2
357 tags2-served
357 tags2-served
358 $ cat server/errors.txt
358 $ cat server/errors.txt
359 #endif
359 #endif
360 #if stream-bundle2-v3
360 #if stream-bundle2-v3
361 $ hg clone --stream -U http://localhost:$HGPORT clone1
361 $ hg clone --stream -U http://localhost:$HGPORT clone1
362 streaming all changes
362 streaming all changes
363 1093 entries to transfer
363 1093 entries to transfer
364 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
364 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
365 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
365 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
366
366
367 $ ls -1 clone1/.hg/cache
367 $ ls -1 clone1/.hg/cache
368 branch2-base
368 branch2-base
369 branch2-immutable
369 branch2-immutable
370 branch2-served
370 branch2-served
371 branch2-served.hidden
371 branch2-served.hidden
372 branch2-visible
372 branch2-visible
373 branch2-visible-hidden
373 branch2-visible-hidden
374 rbc-names-v1
374 rbc-names-v1
375 rbc-revs-v1
375 rbc-revs-v1
376 tags2
376 tags2
377 tags2-served
377 tags2-served
378 $ cat server/errors.txt
378 $ cat server/errors.txt
379 #endif
379 #endif
380
380
381 getbundle requests with stream=1 are uncompressed
381 getbundle requests with stream=1 are uncompressed
382
382
383 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
383 $ get-with-headers.py $LOCALIP:$HGPORT '?cmd=getbundle' content-type --bodyfile body --hgproto '0.1 0.2 comp=zlib,none' --requestheader "x-hgarg-1=bundlecaps=HG20%2Cbundle2%3DHG20%250Abookmarks%250Achangegroup%253D01%252C02%252C03%250Adigests%253Dmd5%252Csha1%252Csha512%250Aerror%253Dabort%252Cunsupportedcontent%252Cpushraced%252Cpushkey%250Ahgtagsfnodes%250Alistkeys%250Aphases%253Dheads%250Apushkey%250Aremote-changegroup%253Dhttp%252Chttps%250Astream%253Dv2&cg=0&common=0000000000000000000000000000000000000000&heads=c17445101a72edac06facd130d14808dfbd5c7c2&stream=1"
384 200 Script output follows
384 200 Script output follows
385 content-type: application/mercurial-0.2
385 content-type: application/mercurial-0.2
386
386
387
387
388 #if no-zstd no-rust
388 #if no-zstd no-rust
389 $ f --size --hex --bytes 256 body
389 $ f --size --hex --bytes 256 body
390 body: size=119123
390 body: size=119123
391 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
391 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
392 0010: 62 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |b.STREAM2.......|
392 0010: 62 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |b.STREAM2.......|
393 0020: 06 09 04 0c 26 62 79 74 65 63 6f 75 6e 74 31 30 |....&bytecount10|
393 0020: 06 09 04 0c 26 62 79 74 65 63 6f 75 6e 74 31 30 |....&bytecount10|
394 0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109|
394 0030: 34 31 31 35 66 69 6c 65 63 6f 75 6e 74 31 30 39 |4115filecount109|
395 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
395 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
396 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
396 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
397 0060: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev|
397 0060: 6f 67 76 31 25 32 43 73 70 61 72 73 65 72 65 76 |ogv1%2Csparserev|
398 0070: 6c 6f 67 00 00 80 00 73 08 42 64 61 74 61 2f 30 |log....s.Bdata/0|
398 0070: 6c 6f 67 00 00 80 00 73 08 42 64 61 74 61 2f 30 |log....s.Bdata/0|
399 0080: 2e 69 00 03 00 01 00 00 00 00 00 00 00 02 00 00 |.i..............|
399 0080: 2e 69 00 03 00 01 00 00 00 00 00 00 00 02 00 00 |.i..............|
400 0090: 00 01 00 00 00 00 00 00 00 01 ff ff ff ff ff ff |................|
400 0090: 00 01 00 00 00 00 00 00 00 01 ff ff ff ff ff ff |................|
401 00a0: ff ff 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 |...)c.I.#....Vg.|
401 00a0: ff ff 80 29 63 a0 49 d3 23 87 bf ce fe 56 67 92 |...)c.I.#....Vg.|
402 00b0: 67 2c 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 |g,i..9..........|
402 00b0: 67 2c 69 d1 ec 39 00 00 00 00 00 00 00 00 00 00 |g,i..9..........|
403 00c0: 00 00 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 |..u0s&Edata/00ch|
403 00c0: 00 00 75 30 73 26 45 64 61 74 61 2f 30 30 63 68 |..u0s&Edata/00ch|
404 00d0: 61 6e 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 |angelog-ab349180|
404 00d0: 61 6e 67 65 6c 6f 67 2d 61 62 33 34 39 31 38 30 |angelog-ab349180|
405 00e0: 61 30 34 30 35 30 31 30 2e 6e 64 2e 69 00 03 00 |a0405010.nd.i...|
405 00e0: 61 30 34 30 35 30 31 30 2e 6e 64 2e 69 00 03 00 |a0405010.nd.i...|
406 00f0: 01 00 00 00 00 00 00 00 05 00 00 00 04 00 00 00 |................|
406 00f0: 01 00 00 00 00 00 00 00 05 00 00 00 04 00 00 00 |................|
407 #endif
407 #endif
408 #if zstd no-rust
408 #if zstd no-rust
409 $ f --size --hex --bytes 256 body
409 $ f --size --hex --bytes 256 body
410 body: size=116310 (no-bigendian !)
410 body: size=116310 (no-bigendian !)
411 body: size=116305 (bigendian !)
411 body: size=116305 (bigendian !)
412 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
412 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
413 0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
413 0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
414 0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
414 0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
415 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (no-bigendian !)
415 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109| (no-bigendian !)
416 0030: 31 32 37 31 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1271filecount109| (bigendian !)
416 0030: 31 32 37 31 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1271filecount109| (bigendian !)
417 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
417 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
418 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
418 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
419 0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
419 0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
420 0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
420 0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
421 0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
421 0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
422 0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
422 0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
423 00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
423 00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
424 00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
424 00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
425 00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
425 00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
426 00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
426 00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
427 00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
427 00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
428 00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
428 00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
429 #endif
429 #endif
430 #if zstd rust no-dirstate-v2
430 #if zstd rust no-dirstate-v2
431 $ f --size --hex --bytes 256 body
431 $ f --size --hex --bytes 256 body
432 body: size=116310
432 body: size=116310
433 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
433 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
434 0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
434 0010: 7c 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 ||.STREAM2.......|
435 0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
435 0020: 06 09 04 0c 40 62 79 74 65 63 6f 75 6e 74 31 30 |....@bytecount10|
436 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109|
436 0030: 31 32 37 36 66 69 6c 65 63 6f 75 6e 74 31 30 39 |1276filecount109|
437 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
437 0040: 33 72 65 71 75 69 72 65 6d 65 6e 74 73 67 65 6e |3requirementsgen|
438 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
438 0050: 65 72 61 6c 64 65 6c 74 61 25 32 43 72 65 76 6c |eraldelta%2Crevl|
439 0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
439 0060: 6f 67 2d 63 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a |og-compression-z|
440 0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
440 0070: 73 74 64 25 32 43 72 65 76 6c 6f 67 76 31 25 32 |std%2Crevlogv1%2|
441 0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
441 0080: 43 73 70 61 72 73 65 72 65 76 6c 6f 67 00 00 80 |Csparserevlog...|
442 0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
442 0090: 00 73 08 42 64 61 74 61 2f 30 2e 69 00 03 00 01 |.s.Bdata/0.i....|
443 00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
443 00a0: 00 00 00 00 00 00 00 02 00 00 00 01 00 00 00 00 |................|
444 00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
444 00b0: 00 00 00 01 ff ff ff ff ff ff ff ff 80 29 63 a0 |.............)c.|
445 00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
445 00c0: 49 d3 23 87 bf ce fe 56 67 92 67 2c 69 d1 ec 39 |I.#....Vg.g,i..9|
446 00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
446 00d0: 00 00 00 00 00 00 00 00 00 00 00 00 75 30 73 26 |............u0s&|
447 00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
447 00e0: 45 64 61 74 61 2f 30 30 63 68 61 6e 67 65 6c 6f |Edata/00changelo|
448 00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
448 00f0: 67 2d 61 62 33 34 39 31 38 30 61 30 34 30 35 30 |g-ab349180a04050|
449 #endif
449 #endif
450 #if zstd dirstate-v2
450 #if zstd dirstate-v2
451 $ f --size --hex --bytes 256 body
451 $ f --size --hex --bytes 256 body
452 body: size=109549
452 body: size=109549
453 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
453 0000: 04 6e 6f 6e 65 48 47 32 30 00 00 00 00 00 00 00 |.noneHG20.......|
454 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
454 0010: c0 07 53 54 52 45 41 4d 32 00 00 00 00 03 00 09 |..STREAM2.......|
455 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95|
455 0020: 05 09 04 0c 85 62 79 74 65 63 6f 75 6e 74 39 35 |.....bytecount95|
456 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
456 0030: 38 39 37 66 69 6c 65 63 6f 75 6e 74 31 30 33 30 |897filecount1030|
457 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
457 0040: 72 65 71 75 69 72 65 6d 65 6e 74 73 64 6f 74 65 |requirementsdote|
458 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs|
458 0050: 6e 63 6f 64 65 25 32 43 65 78 70 2d 64 69 72 73 |ncode%2Cexp-dirs|
459 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach|
459 0060: 74 61 74 65 2d 76 32 25 32 43 66 6e 63 61 63 68 |tate-v2%2Cfncach|
460 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta|
460 0070: 65 25 32 43 67 65 6e 65 72 61 6c 64 65 6c 74 61 |e%2Cgeneraldelta|
461 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no|
461 0080: 25 32 43 70 65 72 73 69 73 74 65 6e 74 2d 6e 6f |%2Cpersistent-no|
462 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c|
462 0090: 64 65 6d 61 70 25 32 43 72 65 76 6c 6f 67 2d 63 |demap%2Crevlog-c|
463 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%|
463 00a0: 6f 6d 70 72 65 73 73 69 6f 6e 2d 7a 73 74 64 25 |ompression-zstd%|
464 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
464 00b0: 32 43 72 65 76 6c 6f 67 76 31 25 32 43 73 70 61 |2Crevlogv1%2Cspa|
465 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
465 00c0: 72 73 65 72 65 76 6c 6f 67 25 32 43 73 74 6f 72 |rserevlog%2Cstor|
466 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
466 00d0: 65 00 00 80 00 73 08 42 64 61 74 61 2f 30 2e 69 |e....s.Bdata/0.i|
467 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
467 00e0: 00 03 00 01 00 00 00 00 00 00 00 02 00 00 00 01 |................|
468 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
468 00f0: 00 00 00 00 00 00 00 01 ff ff ff ff ff ff ff ff |................|
469 #endif
469 #endif
470
470
471 --uncompressed is an alias to --stream
471 --uncompressed is an alias to --stream
472
472
473 #if stream-legacy
473 #if stream-legacy
474 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
474 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
475 streaming all changes
475 streaming all changes
476 1090 files to transfer, 102 KB of data (no-zstd !)
476 1090 files to transfer, 102 KB of data (no-zstd !)
477 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
477 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
478 1090 files to transfer, 98.8 KB of data (zstd !)
478 1090 files to transfer, 98.8 KB of data (zstd !)
479 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
479 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
480 searching for changes
480 searching for changes
481 no changes found
481 no changes found
482 #endif
482 #endif
483 #if stream-bundle2-v2
483 #if stream-bundle2-v2
484 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
484 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
485 streaming all changes
485 streaming all changes
486 1093 files to transfer, 102 KB of data (no-zstd !)
486 1093 files to transfer, 102 KB of data (no-zstd !)
487 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
487 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
488 1093 files to transfer, 98.9 KB of data (zstd !)
488 1093 files to transfer, 98.9 KB of data (zstd !)
489 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
489 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
490 #endif
490 #endif
491 #if stream-bundle2-v3
491 #if stream-bundle2-v3
492 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
492 $ hg clone --uncompressed -U http://localhost:$HGPORT clone1-uncompressed
493 streaming all changes
493 streaming all changes
494 1093 entries to transfer
494 1093 entries to transfer
495 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
495 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
496 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
496 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
497 #endif
497 #endif
498
498
499 Clone with background file closing enabled
499 Clone with background file closing enabled
500
500
501 #if stream-legacy
501 #if stream-legacy
502 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
502 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
503 using http://localhost:$HGPORT/
503 using http://localhost:$HGPORT/
504 sending capabilities command
504 sending capabilities command
505 sending branchmap command
505 sending branchmap command
506 streaming all changes
506 streaming all changes
507 sending stream_out command
507 sending stream_out command
508 1090 files to transfer, 102 KB of data (no-zstd !)
508 1090 files to transfer, 102 KB of data (no-zstd !)
509 1090 files to transfer, 98.8 KB of data (zstd !)
509 1090 files to transfer, 98.8 KB of data (zstd !)
510 starting 4 threads for background file closing
510 starting 4 threads for background file closing
511 updating the branch cache
511 updating the branch cache
512 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
512 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
513 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
513 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
514 query 1; heads
514 query 1; heads
515 sending batch command
515 sending batch command
516 searching for changes
516 searching for changes
517 all remote heads known locally
517 all remote heads known locally
518 no changes found
518 no changes found
519 sending getbundle command
519 sending getbundle command
520 bundle2-input-bundle: with-transaction
520 bundle2-input-bundle: with-transaction
521 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
521 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
522 bundle2-input-part: "phase-heads" supported
522 bundle2-input-part: "phase-heads" supported
523 bundle2-input-part: total payload size 24
523 bundle2-input-part: total payload size 24
524 bundle2-input-bundle: 2 parts total
524 bundle2-input-bundle: 2 parts total
525 checking for updated bookmarks
525 checking for updated bookmarks
526 updating the branch cache
526 updating the branch cache
527 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
527 (sent 5 HTTP requests and * bytes; received * bytes in responses) (glob)
528 #endif
528 #endif
529 #if stream-bundle2-v2
529 #if stream-bundle2-v2
530 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
530 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
531 using http://localhost:$HGPORT/
531 using http://localhost:$HGPORT/
532 sending capabilities command
532 sending capabilities command
533 query 1; heads
533 query 1; heads
534 sending batch command
534 sending batch command
535 streaming all changes
535 streaming all changes
536 sending getbundle command
536 sending getbundle command
537 bundle2-input-bundle: with-transaction
537 bundle2-input-bundle: with-transaction
538 bundle2-input-part: "stream2" (params: 3 mandatory) supported
538 bundle2-input-part: "stream2" (params: 3 mandatory) supported
539 applying stream bundle
539 applying stream bundle
540 1093 files to transfer, 102 KB of data (no-zstd !)
540 1093 files to transfer, 102 KB of data (no-zstd !)
541 1093 files to transfer, 98.9 KB of data (zstd !)
541 1093 files to transfer, 98.9 KB of data (zstd !)
542 starting 4 threads for background file closing
542 starting 4 threads for background file closing
543 starting 4 threads for background file closing
543 starting 4 threads for background file closing
544 updating the branch cache
544 updating the branch cache
545 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
545 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
546 bundle2-input-part: total payload size 118984 (no-zstd !)
546 bundle2-input-part: total payload size 118984 (no-zstd !)
547 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
547 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
548 bundle2-input-part: total payload size 116145 (zstd no-bigendian !)
548 bundle2-input-part: total payload size 116145 (zstd no-bigendian !)
549 bundle2-input-part: total payload size 116140 (zstd bigendian !)
549 bundle2-input-part: total payload size 116140 (zstd bigendian !)
550 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
550 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
551 bundle2-input-bundle: 2 parts total
551 bundle2-input-bundle: 2 parts total
552 checking for updated bookmarks
552 checking for updated bookmarks
553 updating the branch cache
553 updating the branch cache
554 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
554 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
555 #endif
555 #endif
556 #if stream-bundle2-v3
556 #if stream-bundle2-v3
557 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
557 $ hg --debug --config worker.backgroundclose=true --config worker.backgroundcloseminfilecount=1 clone --stream -U http://localhost:$HGPORT clone-background | grep -v adding
558 using http://localhost:$HGPORT/
558 using http://localhost:$HGPORT/
559 sending capabilities command
559 sending capabilities command
560 query 1; heads
560 query 1; heads
561 sending batch command
561 sending batch command
562 streaming all changes
562 streaming all changes
563 sending getbundle command
563 sending getbundle command
564 bundle2-input-bundle: with-transaction
564 bundle2-input-bundle: with-transaction
565 bundle2-input-part: "stream3-exp" (params: 1 mandatory) supported
565 bundle2-input-part: "stream3-exp" (params: 1 mandatory) supported
566 applying stream bundle
566 applying stream bundle
567 1093 entries to transfer
567 1093 entries to transfer
568 starting 4 threads for background file closing
568 starting 4 threads for background file closing
569 starting 4 threads for background file closing
569 starting 4 threads for background file closing
570 updating the branch cache
570 updating the branch cache
571 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
571 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
572 bundle2-input-part: total payload size 120079 (no-zstd !)
572 bundle2-input-part: total payload size 120079 (no-zstd !)
573 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
573 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
574 bundle2-input-part: total payload size 117240 (zstd no-bigendian !)
574 bundle2-input-part: total payload size 117240 (zstd no-bigendian !)
575 bundle2-input-part: total payload size 116138 (zstd bigendian !)
575 bundle2-input-part: total payload size 116138 (zstd bigendian !)
576 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
576 bundle2-input-part: "listkeys" (params: 1 mandatory) supported
577 bundle2-input-bundle: 2 parts total
577 bundle2-input-bundle: 2 parts total
578 checking for updated bookmarks
578 checking for updated bookmarks
579 updating the branch cache
579 updating the branch cache
580 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
580 (sent 3 HTTP requests and * bytes; received * bytes in responses) (glob)
581 #endif
581 #endif
582
582
583 Cannot stream clone when there are secret changesets
583 Cannot stream clone when there are secret changesets
584
584
585 $ hg -R server phase --force --secret -r tip
585 $ hg -R server phase --force --secret -r tip
586 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
586 $ hg clone --stream -U http://localhost:$HGPORT secret-denied
587 warning: stream clone requested but server has them disabled
587 warning: stream clone requested but server has them disabled
588 requesting all changes
588 requesting all changes
589 adding changesets
589 adding changesets
590 adding manifests
590 adding manifests
591 adding file changes
591 adding file changes
592 added 2 changesets with 1025 changes to 1025 files
592 added 2 changesets with 1025 changes to 1025 files
593 new changesets 96ee1d7354c4:c17445101a72
593 new changesets 96ee1d7354c4:c17445101a72
594
594
595 $ killdaemons.py
595 $ killdaemons.py
596
596
597 Streaming of secrets can be overridden by server config
597 Streaming of secrets can be overridden by server config
598
598
599 $ cd server
599 $ cd server
600 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
600 $ hg serve --config server.uncompressedallowsecret=true -p $HGPORT -d --pid-file=hg.pid
601 $ cat hg.pid > $DAEMON_PIDS
601 $ cat hg.pid > $DAEMON_PIDS
602 $ cd ..
602 $ cd ..
603
603
604 #if stream-legacy
604 #if stream-legacy
605 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
605 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
606 streaming all changes
606 streaming all changes
607 1090 files to transfer, 102 KB of data (no-zstd !)
607 1090 files to transfer, 102 KB of data (no-zstd !)
608 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
608 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
609 1090 files to transfer, 98.8 KB of data (zstd !)
609 1090 files to transfer, 98.8 KB of data (zstd !)
610 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
610 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
611 searching for changes
611 searching for changes
612 no changes found
612 no changes found
613 #endif
613 #endif
614 #if stream-bundle2-v2
614 #if stream-bundle2-v2
615 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
615 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
616 streaming all changes
616 streaming all changes
617 1093 files to transfer, 102 KB of data (no-zstd !)
617 1093 files to transfer, 102 KB of data (no-zstd !)
618 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
618 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
619 1093 files to transfer, 98.9 KB of data (zstd !)
619 1093 files to transfer, 98.9 KB of data (zstd !)
620 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
620 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
621 #endif
621 #endif
622 #if stream-bundle2-v3
622 #if stream-bundle2-v3
623 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
623 $ hg clone --stream -U http://localhost:$HGPORT secret-allowed
624 streaming all changes
624 streaming all changes
625 1093 entries to transfer
625 1093 entries to transfer
626 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
626 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
627 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
627 transferred 98.9 KB in * seconds (* */sec) (glob) (zstd !)
628 #endif
628 #endif
629
629
630 $ killdaemons.py
630 $ killdaemons.py
631
631
632 Verify interaction between preferuncompressed and secret presence
632 Verify interaction between preferuncompressed and secret presence
633
633
634 $ cd server
634 $ cd server
635 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
635 $ hg serve --config server.preferuncompressed=true -p $HGPORT -d --pid-file=hg.pid
636 $ cat hg.pid > $DAEMON_PIDS
636 $ cat hg.pid > $DAEMON_PIDS
637 $ cd ..
637 $ cd ..
638
638
639 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
639 $ hg clone -U http://localhost:$HGPORT preferuncompressed-secret
640 requesting all changes
640 requesting all changes
641 adding changesets
641 adding changesets
642 adding manifests
642 adding manifests
643 adding file changes
643 adding file changes
644 added 2 changesets with 1025 changes to 1025 files
644 added 2 changesets with 1025 changes to 1025 files
645 new changesets 96ee1d7354c4:c17445101a72
645 new changesets 96ee1d7354c4:c17445101a72
646
646
647 $ killdaemons.py
647 $ killdaemons.py
648
648
649 Clone not allowed when full bundles disabled and can't serve secrets
649 Clone not allowed when full bundles disabled and can't serve secrets
650
650
651 $ cd server
651 $ cd server
652 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
652 $ hg serve --config server.disablefullbundle=true -p $HGPORT -d --pid-file=hg.pid
653 $ cat hg.pid > $DAEMON_PIDS
653 $ cat hg.pid > $DAEMON_PIDS
654 $ cd ..
654 $ cd ..
655
655
656 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
656 $ hg clone --stream http://localhost:$HGPORT secret-full-disabled
657 warning: stream clone requested but server has them disabled
657 warning: stream clone requested but server has them disabled
658 requesting all changes
658 requesting all changes
659 remote: abort: server has pull-based clones disabled
659 remote: abort: server has pull-based clones disabled
660 abort: pull failed on remote
660 abort: pull failed on remote
661 (remove --pull if specified or upgrade Mercurial)
661 (remove --pull if specified or upgrade Mercurial)
662 [100]
662 [100]
663
663
664 Local stream clone with secrets involved
664 Local stream clone with secrets involved
665 (This is just a test over behavior: if you have access to the repo's files,
665 (This is just a test over behavior: if you have access to the repo's files,
666 there is no security so it isn't important to prevent a clone here.)
666 there is no security so it isn't important to prevent a clone here.)
667
667
668 $ hg clone -U --stream server local-secret
668 $ hg clone -U --stream server local-secret
669 warning: stream clone requested but server has them disabled
669 warning: stream clone requested but server has them disabled
670 requesting all changes
670 requesting all changes
671 adding changesets
671 adding changesets
672 adding manifests
672 adding manifests
673 adding file changes
673 adding file changes
674 added 2 changesets with 1025 changes to 1025 files
674 added 2 changesets with 1025 changes to 1025 files
675 new changesets 96ee1d7354c4:c17445101a72
675 new changesets 96ee1d7354c4:c17445101a72
676
676
677 Stream clone while repo is changing:
677 Stream clone while repo is changing:
678
678
679 $ mkdir changing
679 $ mkdir changing
680 $ cd changing
680 $ cd changing
681
681
682 prepare repo with small and big file to cover both code paths in emitrevlogdata
682 prepare repo with small and big file to cover both code paths in emitrevlogdata
683
683
684 $ hg init repo
684 $ hg init repo
685 $ touch repo/f1
685 $ touch repo/f1
686 $ $TESTDIR/seq.py 50000 > repo/f2
686 $ $TESTDIR/seq.py 50000 > repo/f2
687 $ hg -R repo ci -Aqm "0"
687 $ hg -R repo ci -Aqm "0"
688 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
688 $ HG_TEST_STREAM_WALKED_FILE_1="$TESTTMP/sync_file_walked_1"
689 $ export HG_TEST_STREAM_WALKED_FILE_1
689 $ export HG_TEST_STREAM_WALKED_FILE_1
690 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
690 $ HG_TEST_STREAM_WALKED_FILE_2="$TESTTMP/sync_file_walked_2"
691 $ export HG_TEST_STREAM_WALKED_FILE_2
691 $ export HG_TEST_STREAM_WALKED_FILE_2
692 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
692 $ HG_TEST_STREAM_WALKED_FILE_3="$TESTTMP/sync_file_walked_3"
693 $ export HG_TEST_STREAM_WALKED_FILE_3
693 $ export HG_TEST_STREAM_WALKED_FILE_3
694 # $ cat << EOF >> $HGRCPATH
694 # $ cat << EOF >> $HGRCPATH
695 # > [hooks]
695 # > [hooks]
696 # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
696 # > pre-clone=rm -f "$TESTTMP/sync_file_walked_*"
697 # > EOF
697 # > EOF
698 $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
698 $ hg serve -R repo -p $HGPORT1 -d --error errors.log --pid-file=hg.pid --config extensions.stream_steps="$RUNTESTDIR/testlib/ext-stream-clone-steps.py"
699 $ cat hg.pid >> $DAEMON_PIDS
699 $ cat hg.pid >> $DAEMON_PIDS
700
700
701 clone while modifying the repo between stating file with write lock and
701 clone while modifying the repo between stating file with write lock and
702 actually serving file content
702 actually serving file content
703
703
704 $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
704 $ (hg clone -q --stream -U http://localhost:$HGPORT1 clone; touch "$HG_TEST_STREAM_WALKED_FILE_3") &
705 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
705 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_1
706 $ echo >> repo/f1
706 $ echo >> repo/f1
707 $ echo >> repo/f2
707 $ echo >> repo/f2
708 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
708 $ hg -R repo ci -m "1" --config ui.timeout.warn=-1
709 $ touch $HG_TEST_STREAM_WALKED_FILE_2
709 $ touch $HG_TEST_STREAM_WALKED_FILE_2
710 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
710 $ $RUNTESTDIR/testlib/wait-on-file 10 $HG_TEST_STREAM_WALKED_FILE_3
711 $ hg -R clone id
711 $ hg -R clone id
712 000000000000
712 000000000000
713 $ cat errors.log
713 $ cat errors.log
714 $ cd ..
714 $ cd ..
715
715
716 Stream repository with bookmarks
716 Stream repository with bookmarks
717 --------------------------------
717 --------------------------------
718
718
719 (revert introduction of secret changeset)
719 (revert introduction of secret changeset)
720
720
721 $ hg -R server phase --draft 'secret()'
721 $ hg -R server phase --draft 'secret()'
722
722
723 add a bookmark
723 add a bookmark
724
724
725 $ hg -R server bookmark -r tip some-bookmark
725 $ hg -R server bookmark -r tip some-bookmark
726
726
727 clone it
727 clone it
728
728
729 #if stream-legacy
729 #if stream-legacy
730 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
730 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
731 streaming all changes
731 streaming all changes
732 1090 files to transfer, 102 KB of data (no-zstd !)
732 1090 files to transfer, 102 KB of data (no-zstd !)
733 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
733 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
734 1090 files to transfer, 98.8 KB of data (zstd !)
734 1090 files to transfer, 98.8 KB of data (zstd !)
735 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
735 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
736 searching for changes
736 searching for changes
737 no changes found
737 no changes found
738 updating to branch default
738 updating to branch default
739 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
739 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
740 #endif
740 #endif
741 #if stream-bundle2-v2
741 #if stream-bundle2-v2
742 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
742 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
743 streaming all changes
743 streaming all changes
744 1096 files to transfer, 102 KB of data (no-zstd !)
744 1096 files to transfer, 102 KB of data (no-zstd !)
745 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
745 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
746 1096 files to transfer, 99.1 KB of data (zstd !)
746 1096 files to transfer, 99.1 KB of data (zstd !)
747 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
747 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
748 updating to branch default
748 updating to branch default
749 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
749 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
750 #endif
750 #endif
751 #if stream-bundle2-v3
751 #if stream-bundle2-v3
752 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
752 $ hg clone --stream http://localhost:$HGPORT with-bookmarks
753 streaming all changes
753 streaming all changes
754 1096 entries to transfer
754 1096 entries to transfer
755 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
755 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
756 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
756 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
757 updating to branch default
757 updating to branch default
758 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
758 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
759 #endif
759 #endif
760 $ hg verify -R with-bookmarks -q
760 $ hg verify -R with-bookmarks -q
761 $ hg -R with-bookmarks bookmarks
761 $ hg -R with-bookmarks bookmarks
762 some-bookmark 2:5223b5e3265f
762 some-bookmark 2:5223b5e3265f
763
763
764 Stream repository with phases
764 Stream repository with phases
765 -----------------------------
765 -----------------------------
766
766
767 Clone as publishing
767 Clone as publishing
768
768
769 $ hg -R server phase -r 'all()'
769 $ hg -R server phase -r 'all()'
770 0: draft
770 0: draft
771 1: draft
771 1: draft
772 2: draft
772 2: draft
773
773
774 #if stream-legacy
774 #if stream-legacy
775 $ hg clone --stream http://localhost:$HGPORT phase-publish
775 $ hg clone --stream http://localhost:$HGPORT phase-publish
776 streaming all changes
776 streaming all changes
777 1090 files to transfer, 102 KB of data (no-zstd !)
777 1090 files to transfer, 102 KB of data (no-zstd !)
778 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
778 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
779 1090 files to transfer, 98.8 KB of data (zstd !)
779 1090 files to transfer, 98.8 KB of data (zstd !)
780 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
780 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
781 searching for changes
781 searching for changes
782 no changes found
782 no changes found
783 updating to branch default
783 updating to branch default
784 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
784 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
785 #endif
785 #endif
786 #if stream-bundle2-v2
786 #if stream-bundle2-v2
787 $ hg clone --stream http://localhost:$HGPORT phase-publish
787 $ hg clone --stream http://localhost:$HGPORT phase-publish
788 streaming all changes
788 streaming all changes
789 1096 files to transfer, 102 KB of data (no-zstd !)
789 1096 files to transfer, 102 KB of data (no-zstd !)
790 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
790 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
791 1096 files to transfer, 99.1 KB of data (zstd !)
791 1096 files to transfer, 99.1 KB of data (zstd !)
792 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
792 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
793 updating to branch default
793 updating to branch default
794 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
794 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
795 #endif
795 #endif
796 #if stream-bundle2-v3
796 #if stream-bundle2-v3
797 $ hg clone --stream http://localhost:$HGPORT phase-publish
797 $ hg clone --stream http://localhost:$HGPORT phase-publish
798 streaming all changes
798 streaming all changes
799 1096 entries to transfer
799 1096 entries to transfer
800 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
800 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
801 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
801 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
802 updating to branch default
802 updating to branch default
803 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
803 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
804 #endif
804 #endif
805 $ hg verify -R phase-publish -q
805 $ hg verify -R phase-publish -q
806 $ hg -R phase-publish phase -r 'all()'
806 $ hg -R phase-publish phase -r 'all()'
807 0: public
807 0: public
808 1: public
808 1: public
809 2: public
809 2: public
810
810
811 Clone as non publishing
811 Clone as non publishing
812
812
813 $ cat << EOF >> server/.hg/hgrc
813 $ cat << EOF >> server/.hg/hgrc
814 > [phases]
814 > [phases]
815 > publish = False
815 > publish = False
816 > EOF
816 > EOF
817 $ killdaemons.py
817 $ killdaemons.py
818 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
818 $ hg -R server serve -p $HGPORT -d --pid-file=hg.pid
819 $ cat hg.pid > $DAEMON_PIDS
819 $ cat hg.pid > $DAEMON_PIDS
820
820
821 #if stream-legacy
821 #if stream-legacy
822
822
823 With v1 of the stream protocol, changeset are always cloned as public. It make
823 With v1 of the stream protocol, changeset are always cloned as public. It make
824 stream v1 unsuitable for non-publishing repository.
824 stream v1 unsuitable for non-publishing repository.
825
825
826 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
826 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
827 streaming all changes
827 streaming all changes
828 1090 files to transfer, 102 KB of data (no-zstd !)
828 1090 files to transfer, 102 KB of data (no-zstd !)
829 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
829 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
830 1090 files to transfer, 98.8 KB of data (zstd !)
830 1090 files to transfer, 98.8 KB of data (zstd !)
831 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
831 transferred 98.8 KB in * seconds (* */sec) (glob) (zstd !)
832 searching for changes
832 searching for changes
833 no changes found
833 no changes found
834 updating to branch default
834 updating to branch default
835 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
835 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
836 $ hg -R phase-no-publish phase -r 'all()'
836 $ hg -R phase-no-publish phase -r 'all()'
837 0: public
837 0: public
838 1: public
838 1: public
839 2: public
839 2: public
840 #endif
840 #endif
841 #if stream-bundle2-v2
841 #if stream-bundle2-v2
842 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
842 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
843 streaming all changes
843 streaming all changes
844 1097 files to transfer, 102 KB of data (no-zstd !)
844 1097 files to transfer, 102 KB of data (no-zstd !)
845 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
845 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
846 1097 files to transfer, 99.1 KB of data (zstd !)
846 1097 files to transfer, 99.1 KB of data (zstd !)
847 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
847 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
848 updating to branch default
848 updating to branch default
849 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
849 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
850 $ hg -R phase-no-publish phase -r 'all()'
850 $ hg -R phase-no-publish phase -r 'all()'
851 0: draft
851 0: draft
852 1: draft
852 1: draft
853 2: draft
853 2: draft
854 #endif
854 #endif
855 #if stream-bundle2-v3
855 #if stream-bundle2-v3
856 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
856 $ hg clone --stream http://localhost:$HGPORT phase-no-publish
857 streaming all changes
857 streaming all changes
858 1097 entries to transfer
858 1097 entries to transfer
859 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
859 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
860 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
860 transferred 99.1 KB in * seconds (* */sec) (glob) (zstd !)
861 updating to branch default
861 updating to branch default
862 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
862 1088 files updated, 0 files merged, 0 files removed, 0 files unresolved
863 $ hg -R phase-no-publish phase -r 'all()'
863 $ hg -R phase-no-publish phase -r 'all()'
864 0: draft
864 0: draft
865 1: draft
865 1: draft
866 2: draft
866 2: draft
867 #endif
867 #endif
868 $ hg verify -R phase-no-publish -q
868 $ hg verify -R phase-no-publish -q
869
869
870 $ killdaemons.py
870 $ killdaemons.py
871
871
872 #if stream-legacy
872 #if stream-legacy
873
873
874 With v1 of the stream protocol, changeset are always cloned as public. There's
874 With v1 of the stream protocol, changeset are always cloned as public. There's
875 no obsolescence markers exchange in stream v1.
875 no obsolescence markers exchange in stream v1.
876
876
877 #endif
877 #endif
878 #if stream-bundle2-v2
878 #if stream-bundle2-v2
879
879
880 Stream repository with obsolescence
880 Stream repository with obsolescence
881 -----------------------------------
881 -----------------------------------
882
882
883 Clone non-publishing with obsolescence
883 Clone non-publishing with obsolescence
884
884
885 $ cat >> $HGRCPATH << EOF
885 $ cat >> $HGRCPATH << EOF
886 > [experimental]
886 > [experimental]
887 > evolution=all
887 > evolution=all
888 > EOF
888 > EOF
889
889
890 $ cd server
890 $ cd server
891 $ echo foo > foo
891 $ echo foo > foo
892 $ hg -q commit -m 'about to be pruned'
892 $ hg -q commit -m 'about to be pruned'
893 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
893 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
894 1 new obsolescence markers
894 1 new obsolescence markers
895 obsoleted 1 changesets
895 obsoleted 1 changesets
896 $ hg up null -q
896 $ hg up null -q
897 $ hg log -T '{rev}: {phase}\n'
897 $ hg log -T '{rev}: {phase}\n'
898 2: draft
898 2: draft
899 1: draft
899 1: draft
900 0: draft
900 0: draft
901 $ hg serve -p $HGPORT -d --pid-file=hg.pid
901 $ hg serve -p $HGPORT -d --pid-file=hg.pid
902 $ cat hg.pid > $DAEMON_PIDS
902 $ cat hg.pid > $DAEMON_PIDS
903 $ cd ..
903 $ cd ..
904
904
905 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
905 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
906 streaming all changes
906 streaming all changes
907 1098 files to transfer, 102 KB of data (no-zstd !)
907 1098 files to transfer, 102 KB of data (no-zstd !)
908 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
908 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
909 1098 files to transfer, 99.5 KB of data (zstd !)
909 1098 files to transfer, 99.5 KB of data (zstd !)
910 transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !)
910 transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !)
911 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
911 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
912 2: draft
912 2: draft
913 1: draft
913 1: draft
914 0: draft
914 0: draft
915 $ hg debugobsolete -R with-obsolescence
915 $ hg debugobsolete -R with-obsolescence
916 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
916 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
917 $ hg verify -R with-obsolescence -q
917 $ hg verify -R with-obsolescence -q
918
918
919 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
919 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
920 streaming all changes
920 streaming all changes
921 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
921 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
922 abort: pull failed on remote
922 abort: pull failed on remote
923 [100]
923 [100]
924
924
925 $ killdaemons.py
925 $ killdaemons.py
926
926
927 #endif
927 #endif
928 #if stream-bundle2-v3
928 #if stream-bundle2-v3
929
929
930 Stream repository with obsolescence
930 Stream repository with obsolescence
931 -----------------------------------
931 -----------------------------------
932
932
933 Clone non-publishing with obsolescence
933 Clone non-publishing with obsolescence
934
934
935 $ cat >> $HGRCPATH << EOF
935 $ cat >> $HGRCPATH << EOF
936 > [experimental]
936 > [experimental]
937 > evolution=all
937 > evolution=all
938 > EOF
938 > EOF
939
939
940 $ cd server
940 $ cd server
941 $ echo foo > foo
941 $ echo foo > foo
942 $ hg -q commit -m 'about to be pruned'
942 $ hg -q commit -m 'about to be pruned'
943 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
943 $ hg debugobsolete `hg log -r . -T '{node}'` -d '0 0' -u test --record-parents
944 1 new obsolescence markers
944 1 new obsolescence markers
945 obsoleted 1 changesets
945 obsoleted 1 changesets
946 $ hg up null -q
946 $ hg up null -q
947 $ hg log -T '{rev}: {phase}\n'
947 $ hg log -T '{rev}: {phase}\n'
948 2: draft
948 2: draft
949 1: draft
949 1: draft
950 0: draft
950 0: draft
951 $ hg serve -p $HGPORT -d --pid-file=hg.pid
951 $ hg serve -p $HGPORT -d --pid-file=hg.pid
952 $ cat hg.pid > $DAEMON_PIDS
952 $ cat hg.pid > $DAEMON_PIDS
953 $ cd ..
953 $ cd ..
954
954
955 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
955 $ hg clone -U --stream http://localhost:$HGPORT with-obsolescence
956 streaming all changes
956 streaming all changes
957 1098 entries to transfer
957 1098 entries to transfer
958 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
958 transferred 102 KB in * seconds (* */sec) (glob) (no-zstd !)
959 transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !)
959 transferred 99.5 KB in * seconds (* */sec) (glob) (zstd !)
960 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
960 $ hg -R with-obsolescence log -T '{rev}: {phase}\n'
961 2: draft
961 2: draft
962 1: draft
962 1: draft
963 0: draft
963 0: draft
964 $ hg debugobsolete -R with-obsolescence
964 $ hg debugobsolete -R with-obsolescence
965 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
965 8c206a663911c1f97f2f9d7382e417ae55872cfa 0 {5223b5e3265f0df40bb743da62249413d74ac70f} (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
966 $ hg verify -R with-obsolescence -q
966 $ hg verify -R with-obsolescence -q
967
967
968 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
968 $ hg clone -U --stream --config experimental.evolution=0 http://localhost:$HGPORT with-obsolescence-no-evolution
969 streaming all changes
969 streaming all changes
970 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
970 remote: abort: server has obsolescence markers, but client cannot receive them via stream clone
971 abort: pull failed on remote
971 abort: pull failed on remote
972 [100]
972 [100]
973
973
974 $ killdaemons.py
974 $ killdaemons.py
975
975
976 #endif
976 #endif
977
977
978 Cloning a repo with no requirements doesn't give some obscure error
978 Cloning a repo with no requirements doesn't give some obscure error
979
979
980 $ mkdir -p empty-repo/.hg
980 $ mkdir -p empty-repo/.hg
981 $ hg clone -q --stream ssh://user@dummy/empty-repo empty-repo2
981 $ hg clone -q --stream ssh://user@dummy/empty-repo empty-repo2
982 $ hg --cwd empty-repo2 verify -q
982 $ hg --cwd empty-repo2 verify -q
983
984 Cloning a repo with an empty manifestlog doesn't give some weird error
985
986 $ rm -r empty-repo; hg init empty-repo
987 $ (cd empty-repo; touch x; hg commit -Am empty; hg debugstrip -r 0) > /dev/null
988 $ hg clone -q --stream ssh://user@dummy/empty-repo empty-repo3
989 $ hg --cwd empty-repo3 verify -q 2>&1 | grep -v warning
990 [1]
991
992 The warnings filtered out here are talking about zero-length 'orphan' data files.
993 Those are harmless, so that's fine.
994
General Comments 0
You need to be logged in to leave comments. Login now