##// END OF EJS Templates
changelog-v2: fix the docket `struct`...
marmoute -
r50609:f04d4599 default
parent child Browse files
Show More
@@ -1,428 +1,428 b''
1 # docket - code related to revlog "docket"
1 # docket - code related to revlog "docket"
2 #
2 #
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 ### Revlog docket file
8 ### Revlog docket file
9 #
9 #
10 # The revlog is stored on disk using multiple files:
10 # The revlog is stored on disk using multiple files:
11 #
11 #
12 # * a small docket file, containing metadata and a pointer,
12 # * a small docket file, containing metadata and a pointer,
13 #
13 #
14 # * an index file, containing fixed width information about revisions,
14 # * an index file, containing fixed width information about revisions,
15 #
15 #
16 # * a data file, containing variable width data for these revisions,
16 # * a data file, containing variable width data for these revisions,
17
17
18
18
19 import os
19 import os
20 import random
20 import random
21 import struct
21 import struct
22
22
23 from .. import (
23 from .. import (
24 encoding,
24 encoding,
25 error,
25 error,
26 node,
26 node,
27 util,
27 util,
28 )
28 )
29
29
30 from . import (
30 from . import (
31 constants,
31 constants,
32 )
32 )
33
33
34
34
35 def make_uid(id_size=8):
35 def make_uid(id_size=8):
36 """return a new unique identifier.
36 """return a new unique identifier.
37
37
38 The identifier is random and composed of ascii characters."""
38 The identifier is random and composed of ascii characters."""
39 # size we "hex" the result we need half the number of bits to have a final
39 # size we "hex" the result we need half the number of bits to have a final
40 # uuid of size ID_SIZE
40 # uuid of size ID_SIZE
41 return node.hex(os.urandom(id_size // 2))
41 return node.hex(os.urandom(id_size // 2))
42
42
43
43
44 # some special test logic to avoid anoying random output in the test
44 # some special test logic to avoid anoying random output in the test
45 stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
45 stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
46
46
47 if stable_docket_file:
47 if stable_docket_file:
48
48
49 def make_uid(id_size=8):
49 def make_uid(id_size=8):
50 try:
50 try:
51 with open(stable_docket_file, mode='rb') as f:
51 with open(stable_docket_file, mode='rb') as f:
52 seed = f.read().strip()
52 seed = f.read().strip()
53 except FileNotFoundError:
53 except FileNotFoundError:
54 seed = b'04' # chosen by a fair dice roll. garanteed to be random
54 seed = b'04' # chosen by a fair dice roll. garanteed to be random
55 iter_seed = iter(seed)
55 iter_seed = iter(seed)
56 # some basic circular sum hashing on 64 bits
56 # some basic circular sum hashing on 64 bits
57 int_seed = 0
57 int_seed = 0
58 low_mask = int('1' * 35, 2)
58 low_mask = int('1' * 35, 2)
59 for i in iter_seed:
59 for i in iter_seed:
60 high_part = int_seed >> 35
60 high_part = int_seed >> 35
61 low_part = (int_seed & low_mask) << 28
61 low_part = (int_seed & low_mask) << 28
62 int_seed = high_part + low_part + i
62 int_seed = high_part + low_part + i
63 r = random.Random()
63 r = random.Random()
64 r.seed(int_seed, version=1)
64 r.seed(int_seed, version=1)
65 # once we drop python 3.8 support we can simply use r.randbytes
65 # once we drop python 3.8 support we can simply use r.randbytes
66 raw = r.getrandbits(id_size * 4)
66 raw = r.getrandbits(id_size * 4)
67 assert id_size == 8
67 assert id_size == 8
68 p = struct.pack('>L', raw)
68 p = struct.pack('>L', raw)
69 new = node.hex(p)
69 new = node.hex(p)
70 with open(stable_docket_file, 'wb') as f:
70 with open(stable_docket_file, 'wb') as f:
71 f.write(new)
71 f.write(new)
72 return new
72 return new
73
73
74
74
75 # Docket format
75 # Docket format
76 #
76 #
77 # * 4 bytes: revlog version
77 # * 4 bytes: revlog version
78 # | This is mandatory as docket must be compatible with the previous
78 # | This is mandatory as docket must be compatible with the previous
79 # | revlog index header.
79 # | revlog index header.
80 # * 1 bytes: size of index uuid
80 # * 1 bytes: size of index uuid
81 # * 1 bytes: number of outdated index uuid
81 # * 1 bytes: number of outdated index uuid
82 # * 1 bytes: size of data uuid
82 # * 1 bytes: size of data uuid
83 # * 1 bytes: number of outdated data uuid
83 # * 1 bytes: number of outdated data uuid
84 # * 1 bytes: size of sizedata uuid
84 # * 1 bytes: size of sizedata uuid
85 # * 1 bytes: number of outdated data uuid
85 # * 1 bytes: number of outdated data uuid
86 # * 8 bytes: size of index-data
86 # * 8 bytes: size of index-data
87 # * 8 bytes: pending size of index-data
87 # * 8 bytes: pending size of index-data
88 # * 8 bytes: size of data
88 # * 8 bytes: size of data
89 # * 8 bytes: size of sidedata
89 # * 8 bytes: size of sidedata
90 # * 8 bytes: pending size of data
90 # * 8 bytes: pending size of data
91 # * 8 bytes: pending size of sidedata
91 # * 8 bytes: pending size of sidedata
92 # * 1 bytes: default compression header
92 # * 1 bytes: default compression header
93 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBLLLLLLc')
93 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBQQQQQQc')
94 # * 1 bytes: size of index uuid
94 # * 1 bytes: size of index uuid
95 # * 8 bytes: size of file
95 # * 8 bytes: size of file
96 S_OLD_UID = struct.Struct('>BL')
96 S_OLD_UID = struct.Struct('>BL')
97
97
98
98
99 class RevlogDocket:
99 class RevlogDocket:
100 """metadata associated with revlog"""
100 """metadata associated with revlog"""
101
101
102 def __init__(
102 def __init__(
103 self,
103 self,
104 revlog,
104 revlog,
105 use_pending=False,
105 use_pending=False,
106 version_header=None,
106 version_header=None,
107 index_uuid=None,
107 index_uuid=None,
108 older_index_uuids=(),
108 older_index_uuids=(),
109 data_uuid=None,
109 data_uuid=None,
110 older_data_uuids=(),
110 older_data_uuids=(),
111 sidedata_uuid=None,
111 sidedata_uuid=None,
112 older_sidedata_uuids=(),
112 older_sidedata_uuids=(),
113 index_end=0,
113 index_end=0,
114 pending_index_end=0,
114 pending_index_end=0,
115 data_end=0,
115 data_end=0,
116 pending_data_end=0,
116 pending_data_end=0,
117 sidedata_end=0,
117 sidedata_end=0,
118 pending_sidedata_end=0,
118 pending_sidedata_end=0,
119 default_compression_header=None,
119 default_compression_header=None,
120 ):
120 ):
121 self._version_header = version_header
121 self._version_header = version_header
122 self._read_only = bool(use_pending)
122 self._read_only = bool(use_pending)
123 self._dirty = False
123 self._dirty = False
124 self._radix = revlog.radix
124 self._radix = revlog.radix
125 self._path = revlog._docket_file
125 self._path = revlog._docket_file
126 self._opener = revlog.opener
126 self._opener = revlog.opener
127 self._index_uuid = index_uuid
127 self._index_uuid = index_uuid
128 self._older_index_uuids = older_index_uuids
128 self._older_index_uuids = older_index_uuids
129 self._data_uuid = data_uuid
129 self._data_uuid = data_uuid
130 self._older_data_uuids = older_data_uuids
130 self._older_data_uuids = older_data_uuids
131 self._sidedata_uuid = sidedata_uuid
131 self._sidedata_uuid = sidedata_uuid
132 self._older_sidedata_uuids = older_sidedata_uuids
132 self._older_sidedata_uuids = older_sidedata_uuids
133 assert not set(older_index_uuids) & set(older_data_uuids)
133 assert not set(older_index_uuids) & set(older_data_uuids)
134 assert not set(older_data_uuids) & set(older_sidedata_uuids)
134 assert not set(older_data_uuids) & set(older_sidedata_uuids)
135 assert not set(older_index_uuids) & set(older_sidedata_uuids)
135 assert not set(older_index_uuids) & set(older_sidedata_uuids)
136 # thes asserts should be True as long as we have a single index filename
136 # thes asserts should be True as long as we have a single index filename
137 assert index_end <= pending_index_end
137 assert index_end <= pending_index_end
138 assert data_end <= pending_data_end
138 assert data_end <= pending_data_end
139 assert sidedata_end <= pending_sidedata_end
139 assert sidedata_end <= pending_sidedata_end
140 self._initial_index_end = index_end
140 self._initial_index_end = index_end
141 self._pending_index_end = pending_index_end
141 self._pending_index_end = pending_index_end
142 self._initial_data_end = data_end
142 self._initial_data_end = data_end
143 self._pending_data_end = pending_data_end
143 self._pending_data_end = pending_data_end
144 self._initial_sidedata_end = sidedata_end
144 self._initial_sidedata_end = sidedata_end
145 self._pending_sidedata_end = pending_sidedata_end
145 self._pending_sidedata_end = pending_sidedata_end
146 if use_pending:
146 if use_pending:
147 self._index_end = self._pending_index_end
147 self._index_end = self._pending_index_end
148 self._data_end = self._pending_data_end
148 self._data_end = self._pending_data_end
149 self._sidedata_end = self._pending_sidedata_end
149 self._sidedata_end = self._pending_sidedata_end
150 else:
150 else:
151 self._index_end = self._initial_index_end
151 self._index_end = self._initial_index_end
152 self._data_end = self._initial_data_end
152 self._data_end = self._initial_data_end
153 self._sidedata_end = self._initial_sidedata_end
153 self._sidedata_end = self._initial_sidedata_end
154 self.default_compression_header = default_compression_header
154 self.default_compression_header = default_compression_header
155
155
156 def index_filepath(self):
156 def index_filepath(self):
157 """file path to the current index file associated to this docket"""
157 """file path to the current index file associated to this docket"""
158 # very simplistic version at first
158 # very simplistic version at first
159 if self._index_uuid is None:
159 if self._index_uuid is None:
160 self._index_uuid = make_uid()
160 self._index_uuid = make_uid()
161 return b"%s-%s.idx" % (self._radix, self._index_uuid)
161 return b"%s-%s.idx" % (self._radix, self._index_uuid)
162
162
163 def new_index_file(self):
163 def new_index_file(self):
164 """switch index file to a new UID
164 """switch index file to a new UID
165
165
166 The previous index UID is moved to the "older" list."""
166 The previous index UID is moved to the "older" list."""
167 old = (self._index_uuid, self._index_end)
167 old = (self._index_uuid, self._index_end)
168 self._older_index_uuids.insert(0, old)
168 self._older_index_uuids.insert(0, old)
169 self._index_uuid = make_uid()
169 self._index_uuid = make_uid()
170 return self.index_filepath()
170 return self.index_filepath()
171
171
172 def old_index_filepaths(self, include_empty=True):
172 def old_index_filepaths(self, include_empty=True):
173 """yield file path to older index files associated to this docket"""
173 """yield file path to older index files associated to this docket"""
174 # very simplistic version at first
174 # very simplistic version at first
175 for uuid, size in self._older_index_uuids:
175 for uuid, size in self._older_index_uuids:
176 if include_empty or size > 0:
176 if include_empty or size > 0:
177 yield b"%s-%s.idx" % (self._radix, uuid)
177 yield b"%s-%s.idx" % (self._radix, uuid)
178
178
179 def data_filepath(self):
179 def data_filepath(self):
180 """file path to the current data file associated to this docket"""
180 """file path to the current data file associated to this docket"""
181 # very simplistic version at first
181 # very simplistic version at first
182 if self._data_uuid is None:
182 if self._data_uuid is None:
183 self._data_uuid = make_uid()
183 self._data_uuid = make_uid()
184 return b"%s-%s.dat" % (self._radix, self._data_uuid)
184 return b"%s-%s.dat" % (self._radix, self._data_uuid)
185
185
186 def new_data_file(self):
186 def new_data_file(self):
187 """switch data file to a new UID
187 """switch data file to a new UID
188
188
189 The previous data UID is moved to the "older" list."""
189 The previous data UID is moved to the "older" list."""
190 old = (self._data_uuid, self._data_end)
190 old = (self._data_uuid, self._data_end)
191 self._older_data_uuids.insert(0, old)
191 self._older_data_uuids.insert(0, old)
192 self._data_uuid = make_uid()
192 self._data_uuid = make_uid()
193 return self.data_filepath()
193 return self.data_filepath()
194
194
195 def old_data_filepaths(self, include_empty=True):
195 def old_data_filepaths(self, include_empty=True):
196 """yield file path to older data files associated to this docket"""
196 """yield file path to older data files associated to this docket"""
197 # very simplistic version at first
197 # very simplistic version at first
198 for uuid, size in self._older_data_uuids:
198 for uuid, size in self._older_data_uuids:
199 if include_empty or size > 0:
199 if include_empty or size > 0:
200 yield b"%s-%s.dat" % (self._radix, uuid)
200 yield b"%s-%s.dat" % (self._radix, uuid)
201
201
202 def sidedata_filepath(self):
202 def sidedata_filepath(self):
203 """file path to the current sidedata file associated to this docket"""
203 """file path to the current sidedata file associated to this docket"""
204 # very simplistic version at first
204 # very simplistic version at first
205 if self._sidedata_uuid is None:
205 if self._sidedata_uuid is None:
206 self._sidedata_uuid = make_uid()
206 self._sidedata_uuid = make_uid()
207 return b"%s-%s.sda" % (self._radix, self._sidedata_uuid)
207 return b"%s-%s.sda" % (self._radix, self._sidedata_uuid)
208
208
209 def new_sidedata_file(self):
209 def new_sidedata_file(self):
210 """switch sidedata file to a new UID
210 """switch sidedata file to a new UID
211
211
212 The previous sidedata UID is moved to the "older" list."""
212 The previous sidedata UID is moved to the "older" list."""
213 old = (self._sidedata_uuid, self._sidedata_end)
213 old = (self._sidedata_uuid, self._sidedata_end)
214 self._older_sidedata_uuids.insert(0, old)
214 self._older_sidedata_uuids.insert(0, old)
215 self._sidedata_uuid = make_uid()
215 self._sidedata_uuid = make_uid()
216 return self.sidedata_filepath()
216 return self.sidedata_filepath()
217
217
218 def old_sidedata_filepaths(self, include_empty=True):
218 def old_sidedata_filepaths(self, include_empty=True):
219 """yield file path to older sidedata files associated to this docket"""
219 """yield file path to older sidedata files associated to this docket"""
220 # very simplistic version at first
220 # very simplistic version at first
221 for uuid, size in self._older_sidedata_uuids:
221 for uuid, size in self._older_sidedata_uuids:
222 if include_empty or size > 0:
222 if include_empty or size > 0:
223 yield b"%s-%s.sda" % (self._radix, uuid)
223 yield b"%s-%s.sda" % (self._radix, uuid)
224
224
225 @property
225 @property
226 def index_end(self):
226 def index_end(self):
227 return self._index_end
227 return self._index_end
228
228
229 @index_end.setter
229 @index_end.setter
230 def index_end(self, new_size):
230 def index_end(self, new_size):
231 if new_size != self._index_end:
231 if new_size != self._index_end:
232 self._index_end = new_size
232 self._index_end = new_size
233 self._dirty = True
233 self._dirty = True
234
234
235 @property
235 @property
236 def data_end(self):
236 def data_end(self):
237 return self._data_end
237 return self._data_end
238
238
239 @data_end.setter
239 @data_end.setter
240 def data_end(self, new_size):
240 def data_end(self, new_size):
241 if new_size != self._data_end:
241 if new_size != self._data_end:
242 self._data_end = new_size
242 self._data_end = new_size
243 self._dirty = True
243 self._dirty = True
244
244
245 @property
245 @property
246 def sidedata_end(self):
246 def sidedata_end(self):
247 return self._sidedata_end
247 return self._sidedata_end
248
248
249 @sidedata_end.setter
249 @sidedata_end.setter
250 def sidedata_end(self, new_size):
250 def sidedata_end(self, new_size):
251 if new_size != self._sidedata_end:
251 if new_size != self._sidedata_end:
252 self._sidedata_end = new_size
252 self._sidedata_end = new_size
253 self._dirty = True
253 self._dirty = True
254
254
255 def write(self, transaction, pending=False, stripping=False):
255 def write(self, transaction, pending=False, stripping=False):
256 """write the modification of disk if any
256 """write the modification of disk if any
257
257
258 This make the new content visible to all process"""
258 This make the new content visible to all process"""
259 if not self._dirty:
259 if not self._dirty:
260 return False
260 return False
261 else:
261 else:
262 if self._read_only:
262 if self._read_only:
263 msg = b'writing read-only docket: %s'
263 msg = b'writing read-only docket: %s'
264 msg %= self._path
264 msg %= self._path
265 raise error.ProgrammingError(msg)
265 raise error.ProgrammingError(msg)
266 if not stripping:
266 if not stripping:
267 # XXX we could, leverage the docket while stripping. However it
267 # XXX we could, leverage the docket while stripping. However it
268 # is not powerfull enough at the time of this comment
268 # is not powerfull enough at the time of this comment
269 transaction.addbackup(self._path, location=b'store')
269 transaction.addbackup(self._path, location=b'store')
270 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
270 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
271 f.write(self._serialize(pending=pending))
271 f.write(self._serialize(pending=pending))
272 # if pending we still need to the write final data eventually
272 # if pending we still need to the write final data eventually
273 self._dirty = pending
273 self._dirty = pending
274 return True
274 return True
275
275
276 def _serialize(self, pending=False):
276 def _serialize(self, pending=False):
277 if pending:
277 if pending:
278 official_index_end = self._initial_index_end
278 official_index_end = self._initial_index_end
279 official_data_end = self._initial_data_end
279 official_data_end = self._initial_data_end
280 official_sidedata_end = self._initial_sidedata_end
280 official_sidedata_end = self._initial_sidedata_end
281 else:
281 else:
282 official_index_end = self._index_end
282 official_index_end = self._index_end
283 official_data_end = self._data_end
283 official_data_end = self._data_end
284 official_sidedata_end = self._sidedata_end
284 official_sidedata_end = self._sidedata_end
285
285
286 # this assert should be True as long as we have a single index filename
286 # this assert should be True as long as we have a single index filename
287 assert official_data_end <= self._data_end
287 assert official_data_end <= self._data_end
288 assert official_sidedata_end <= self._sidedata_end
288 assert official_sidedata_end <= self._sidedata_end
289 data = (
289 data = (
290 self._version_header,
290 self._version_header,
291 len(self._index_uuid),
291 len(self._index_uuid),
292 len(self._older_index_uuids),
292 len(self._older_index_uuids),
293 len(self._data_uuid),
293 len(self._data_uuid),
294 len(self._older_data_uuids),
294 len(self._older_data_uuids),
295 len(self._sidedata_uuid),
295 len(self._sidedata_uuid),
296 len(self._older_sidedata_uuids),
296 len(self._older_sidedata_uuids),
297 official_index_end,
297 official_index_end,
298 self._index_end,
298 self._index_end,
299 official_data_end,
299 official_data_end,
300 self._data_end,
300 self._data_end,
301 official_sidedata_end,
301 official_sidedata_end,
302 self._sidedata_end,
302 self._sidedata_end,
303 self.default_compression_header,
303 self.default_compression_header,
304 )
304 )
305 s = []
305 s = []
306 s.append(S_HEADER.pack(*data))
306 s.append(S_HEADER.pack(*data))
307
307
308 s.append(self._index_uuid)
308 s.append(self._index_uuid)
309 for u, size in self._older_index_uuids:
309 for u, size in self._older_index_uuids:
310 s.append(S_OLD_UID.pack(len(u), size))
310 s.append(S_OLD_UID.pack(len(u), size))
311 for u, size in self._older_index_uuids:
311 for u, size in self._older_index_uuids:
312 s.append(u)
312 s.append(u)
313
313
314 s.append(self._data_uuid)
314 s.append(self._data_uuid)
315 for u, size in self._older_data_uuids:
315 for u, size in self._older_data_uuids:
316 s.append(S_OLD_UID.pack(len(u), size))
316 s.append(S_OLD_UID.pack(len(u), size))
317 for u, size in self._older_data_uuids:
317 for u, size in self._older_data_uuids:
318 s.append(u)
318 s.append(u)
319
319
320 s.append(self._sidedata_uuid)
320 s.append(self._sidedata_uuid)
321 for u, size in self._older_sidedata_uuids:
321 for u, size in self._older_sidedata_uuids:
322 s.append(S_OLD_UID.pack(len(u), size))
322 s.append(S_OLD_UID.pack(len(u), size))
323 for u, size in self._older_sidedata_uuids:
323 for u, size in self._older_sidedata_uuids:
324 s.append(u)
324 s.append(u)
325 return b''.join(s)
325 return b''.join(s)
326
326
327
327
328 def default_docket(revlog, version_header):
328 def default_docket(revlog, version_header):
329 """given a revlog version a new docket object for the given revlog"""
329 """given a revlog version a new docket object for the given revlog"""
330 rl_version = version_header & 0xFFFF
330 rl_version = version_header & 0xFFFF
331 if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
331 if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
332 return None
332 return None
333 comp = util.compengines[revlog._compengine].revlogheader()
333 comp = util.compengines[revlog._compengine].revlogheader()
334 docket = RevlogDocket(
334 docket = RevlogDocket(
335 revlog,
335 revlog,
336 version_header=version_header,
336 version_header=version_header,
337 default_compression_header=comp,
337 default_compression_header=comp,
338 )
338 )
339 docket._dirty = True
339 docket._dirty = True
340 return docket
340 return docket
341
341
342
342
343 def _parse_old_uids(get_data, count):
343 def _parse_old_uids(get_data, count):
344 all_sizes = []
344 all_sizes = []
345 all_uids = []
345 all_uids = []
346 for i in range(0, count):
346 for i in range(0, count):
347 raw = get_data(S_OLD_UID.size)
347 raw = get_data(S_OLD_UID.size)
348 all_sizes.append(S_OLD_UID.unpack(raw))
348 all_sizes.append(S_OLD_UID.unpack(raw))
349
349
350 for uid_size, file_size in all_sizes:
350 for uid_size, file_size in all_sizes:
351 uid = get_data(uid_size)
351 uid = get_data(uid_size)
352 all_uids.append((uid, file_size))
352 all_uids.append((uid, file_size))
353 return all_uids
353 return all_uids
354
354
355
355
356 def parse_docket(revlog, data, use_pending=False):
356 def parse_docket(revlog, data, use_pending=False):
357 """given some docket data return a docket object for the given revlog"""
357 """given some docket data return a docket object for the given revlog"""
358 header = S_HEADER.unpack(data[: S_HEADER.size])
358 header = S_HEADER.unpack(data[: S_HEADER.size])
359
359
360 # this is a mutable closure capture used in `get_data`
360 # this is a mutable closure capture used in `get_data`
361 offset = [S_HEADER.size]
361 offset = [S_HEADER.size]
362
362
363 def get_data(size):
363 def get_data(size):
364 """utility closure to access the `size` next bytes"""
364 """utility closure to access the `size` next bytes"""
365 if offset[0] + size > len(data):
365 if offset[0] + size > len(data):
366 # XXX better class
366 # XXX better class
367 msg = b"docket is too short, expected %d got %d"
367 msg = b"docket is too short, expected %d got %d"
368 msg %= (offset[0] + size, len(data))
368 msg %= (offset[0] + size, len(data))
369 raise error.Abort(msg)
369 raise error.Abort(msg)
370 raw = data[offset[0] : offset[0] + size]
370 raw = data[offset[0] : offset[0] + size]
371 offset[0] += size
371 offset[0] += size
372 return raw
372 return raw
373
373
374 iheader = iter(header)
374 iheader = iter(header)
375
375
376 version_header = next(iheader)
376 version_header = next(iheader)
377
377
378 index_uuid_size = next(iheader)
378 index_uuid_size = next(iheader)
379 index_uuid = get_data(index_uuid_size)
379 index_uuid = get_data(index_uuid_size)
380
380
381 older_index_uuid_count = next(iheader)
381 older_index_uuid_count = next(iheader)
382 older_index_uuids = _parse_old_uids(get_data, older_index_uuid_count)
382 older_index_uuids = _parse_old_uids(get_data, older_index_uuid_count)
383
383
384 data_uuid_size = next(iheader)
384 data_uuid_size = next(iheader)
385 data_uuid = get_data(data_uuid_size)
385 data_uuid = get_data(data_uuid_size)
386
386
387 older_data_uuid_count = next(iheader)
387 older_data_uuid_count = next(iheader)
388 older_data_uuids = _parse_old_uids(get_data, older_data_uuid_count)
388 older_data_uuids = _parse_old_uids(get_data, older_data_uuid_count)
389
389
390 sidedata_uuid_size = next(iheader)
390 sidedata_uuid_size = next(iheader)
391 sidedata_uuid = get_data(sidedata_uuid_size)
391 sidedata_uuid = get_data(sidedata_uuid_size)
392
392
393 older_sidedata_uuid_count = next(iheader)
393 older_sidedata_uuid_count = next(iheader)
394 older_sidedata_uuids = _parse_old_uids(get_data, older_sidedata_uuid_count)
394 older_sidedata_uuids = _parse_old_uids(get_data, older_sidedata_uuid_count)
395
395
396 index_size = next(iheader)
396 index_size = next(iheader)
397
397
398 pending_index_size = next(iheader)
398 pending_index_size = next(iheader)
399
399
400 data_size = next(iheader)
400 data_size = next(iheader)
401
401
402 pending_data_size = next(iheader)
402 pending_data_size = next(iheader)
403
403
404 sidedata_size = next(iheader)
404 sidedata_size = next(iheader)
405
405
406 pending_sidedata_size = next(iheader)
406 pending_sidedata_size = next(iheader)
407
407
408 default_compression_header = next(iheader)
408 default_compression_header = next(iheader)
409
409
410 docket = RevlogDocket(
410 docket = RevlogDocket(
411 revlog,
411 revlog,
412 use_pending=use_pending,
412 use_pending=use_pending,
413 version_header=version_header,
413 version_header=version_header,
414 index_uuid=index_uuid,
414 index_uuid=index_uuid,
415 older_index_uuids=older_index_uuids,
415 older_index_uuids=older_index_uuids,
416 data_uuid=data_uuid,
416 data_uuid=data_uuid,
417 older_data_uuids=older_data_uuids,
417 older_data_uuids=older_data_uuids,
418 sidedata_uuid=sidedata_uuid,
418 sidedata_uuid=sidedata_uuid,
419 older_sidedata_uuids=older_sidedata_uuids,
419 older_sidedata_uuids=older_sidedata_uuids,
420 index_end=index_size,
420 index_end=index_size,
421 pending_index_end=pending_index_size,
421 pending_index_end=pending_index_size,
422 data_end=data_size,
422 data_end=data_size,
423 pending_data_end=pending_data_size,
423 pending_data_end=pending_data_size,
424 sidedata_end=sidedata_size,
424 sidedata_end=sidedata_size,
425 pending_sidedata_end=pending_sidedata_size,
425 pending_sidedata_end=pending_sidedata_size,
426 default_compression_header=default_compression_header,
426 default_compression_header=default_compression_header,
427 )
427 )
428 return docket
428 return docket
General Comments 0
You need to be logged in to leave comments. Login now