##// END OF EJS Templates
revlogutils: remove Python 2 variant for iter_seed...
Gregory Szorc -
r49763:ed2af456 default
parent child Browse files
Show More
@@ -1,440 +1,435 b''
1 # docket - code related to revlog "docket"
1 # docket - code related to revlog "docket"
2 #
2 #
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 ### Revlog docket file
8 ### Revlog docket file
9 #
9 #
10 # The revlog is stored on disk using multiple files:
10 # The revlog is stored on disk using multiple files:
11 #
11 #
12 # * a small docket file, containing metadata and a pointer,
12 # * a small docket file, containing metadata and a pointer,
13 #
13 #
14 # * an index file, containing fixed width information about revisions,
14 # * an index file, containing fixed width information about revisions,
15 #
15 #
16 # * a data file, containing variable width data for these revisions,
16 # * a data file, containing variable width data for these revisions,
17
17
18
18
19 import errno
19 import errno
20 import os
20 import os
21 import random
21 import random
22 import struct
22 import struct
23
23
24 from .. import (
24 from .. import (
25 encoding,
25 encoding,
26 error,
26 error,
27 node,
27 node,
28 pycompat,
28 pycompat,
29 util,
29 util,
30 )
30 )
31
31
32 from . import (
32 from . import (
33 constants,
33 constants,
34 )
34 )
35
35
36
36
37 def make_uid(id_size=8):
37 def make_uid(id_size=8):
38 """return a new unique identifier.
38 """return a new unique identifier.
39
39
40 The identifier is random and composed of ascii characters."""
40 The identifier is random and composed of ascii characters."""
41 # size we "hex" the result we need half the number of bits to have a final
41 # size we "hex" the result we need half the number of bits to have a final
42 # uuid of size ID_SIZE
42 # uuid of size ID_SIZE
43 return node.hex(os.urandom(id_size // 2))
43 return node.hex(os.urandom(id_size // 2))
44
44
45
45
46 # some special test logic to avoid anoying random output in the test
46 # some special test logic to avoid anoying random output in the test
47 stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
47 stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
48
48
49 if stable_docket_file:
49 if stable_docket_file:
50
50
51 def make_uid(id_size=8):
51 def make_uid(id_size=8):
52 try:
52 try:
53 with open(stable_docket_file, mode='rb') as f:
53 with open(stable_docket_file, mode='rb') as f:
54 seed = f.read().strip()
54 seed = f.read().strip()
55 except IOError as inst:
55 except IOError as inst:
56 if inst.errno != errno.ENOENT:
56 if inst.errno != errno.ENOENT:
57 raise
57 raise
58 seed = b'04' # chosen by a fair dice roll. garanteed to be random
58 seed = b'04' # chosen by a fair dice roll. garanteed to be random
59 if pycompat.ispy3:
59 iter_seed = iter(seed)
60 iter_seed = iter(seed)
61 else:
62 # pytype: disable=wrong-arg-types
63 iter_seed = (ord(c) for c in seed)
64 # pytype: enable=wrong-arg-types
65 # some basic circular sum hashing on 64 bits
60 # some basic circular sum hashing on 64 bits
66 int_seed = 0
61 int_seed = 0
67 low_mask = int('1' * 35, 2)
62 low_mask = int('1' * 35, 2)
68 for i in iter_seed:
63 for i in iter_seed:
69 high_part = int_seed >> 35
64 high_part = int_seed >> 35
70 low_part = (int_seed & low_mask) << 28
65 low_part = (int_seed & low_mask) << 28
71 int_seed = high_part + low_part + i
66 int_seed = high_part + low_part + i
72 r = random.Random()
67 r = random.Random()
73 if pycompat.ispy3:
68 if pycompat.ispy3:
74 r.seed(int_seed, version=1)
69 r.seed(int_seed, version=1)
75 else:
70 else:
76 r.seed(int_seed)
71 r.seed(int_seed)
77 # once we drop python 3.8 support we can simply use r.randbytes
72 # once we drop python 3.8 support we can simply use r.randbytes
78 raw = r.getrandbits(id_size * 4)
73 raw = r.getrandbits(id_size * 4)
79 assert id_size == 8
74 assert id_size == 8
80 p = struct.pack('>L', raw)
75 p = struct.pack('>L', raw)
81 new = node.hex(p)
76 new = node.hex(p)
82 with open(stable_docket_file, 'wb') as f:
77 with open(stable_docket_file, 'wb') as f:
83 f.write(new)
78 f.write(new)
84 return new
79 return new
85
80
86
81
87 # Docket format
82 # Docket format
88 #
83 #
89 # * 4 bytes: revlog version
84 # * 4 bytes: revlog version
90 # | This is mandatory as docket must be compatible with the previous
85 # | This is mandatory as docket must be compatible with the previous
91 # | revlog index header.
86 # | revlog index header.
92 # * 1 bytes: size of index uuid
87 # * 1 bytes: size of index uuid
93 # * 1 bytes: number of outdated index uuid
88 # * 1 bytes: number of outdated index uuid
94 # * 1 bytes: size of data uuid
89 # * 1 bytes: size of data uuid
95 # * 1 bytes: number of outdated data uuid
90 # * 1 bytes: number of outdated data uuid
96 # * 1 bytes: size of sizedata uuid
91 # * 1 bytes: size of sizedata uuid
97 # * 1 bytes: number of outdated data uuid
92 # * 1 bytes: number of outdated data uuid
98 # * 8 bytes: size of index-data
93 # * 8 bytes: size of index-data
99 # * 8 bytes: pending size of index-data
94 # * 8 bytes: pending size of index-data
100 # * 8 bytes: size of data
95 # * 8 bytes: size of data
101 # * 8 bytes: size of sidedata
96 # * 8 bytes: size of sidedata
102 # * 8 bytes: pending size of data
97 # * 8 bytes: pending size of data
103 # * 8 bytes: pending size of sidedata
98 # * 8 bytes: pending size of sidedata
104 # * 1 bytes: default compression header
99 # * 1 bytes: default compression header
105 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBLLLLLLc')
100 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBLLLLLLc')
106 # * 1 bytes: size of index uuid
101 # * 1 bytes: size of index uuid
107 # * 8 bytes: size of file
102 # * 8 bytes: size of file
108 S_OLD_UID = struct.Struct('>BL')
103 S_OLD_UID = struct.Struct('>BL')
109
104
110
105
111 class RevlogDocket(object):
106 class RevlogDocket(object):
112 """metadata associated with revlog"""
107 """metadata associated with revlog"""
113
108
114 def __init__(
109 def __init__(
115 self,
110 self,
116 revlog,
111 revlog,
117 use_pending=False,
112 use_pending=False,
118 version_header=None,
113 version_header=None,
119 index_uuid=None,
114 index_uuid=None,
120 older_index_uuids=(),
115 older_index_uuids=(),
121 data_uuid=None,
116 data_uuid=None,
122 older_data_uuids=(),
117 older_data_uuids=(),
123 sidedata_uuid=None,
118 sidedata_uuid=None,
124 older_sidedata_uuids=(),
119 older_sidedata_uuids=(),
125 index_end=0,
120 index_end=0,
126 pending_index_end=0,
121 pending_index_end=0,
127 data_end=0,
122 data_end=0,
128 pending_data_end=0,
123 pending_data_end=0,
129 sidedata_end=0,
124 sidedata_end=0,
130 pending_sidedata_end=0,
125 pending_sidedata_end=0,
131 default_compression_header=None,
126 default_compression_header=None,
132 ):
127 ):
133 self._version_header = version_header
128 self._version_header = version_header
134 self._read_only = bool(use_pending)
129 self._read_only = bool(use_pending)
135 self._dirty = False
130 self._dirty = False
136 self._radix = revlog.radix
131 self._radix = revlog.radix
137 self._path = revlog._docket_file
132 self._path = revlog._docket_file
138 self._opener = revlog.opener
133 self._opener = revlog.opener
139 self._index_uuid = index_uuid
134 self._index_uuid = index_uuid
140 self._older_index_uuids = older_index_uuids
135 self._older_index_uuids = older_index_uuids
141 self._data_uuid = data_uuid
136 self._data_uuid = data_uuid
142 self._older_data_uuids = older_data_uuids
137 self._older_data_uuids = older_data_uuids
143 self._sidedata_uuid = sidedata_uuid
138 self._sidedata_uuid = sidedata_uuid
144 self._older_sidedata_uuids = older_sidedata_uuids
139 self._older_sidedata_uuids = older_sidedata_uuids
145 assert not set(older_index_uuids) & set(older_data_uuids)
140 assert not set(older_index_uuids) & set(older_data_uuids)
146 assert not set(older_data_uuids) & set(older_sidedata_uuids)
141 assert not set(older_data_uuids) & set(older_sidedata_uuids)
147 assert not set(older_index_uuids) & set(older_sidedata_uuids)
142 assert not set(older_index_uuids) & set(older_sidedata_uuids)
148 # thes asserts should be True as long as we have a single index filename
143 # thes asserts should be True as long as we have a single index filename
149 assert index_end <= pending_index_end
144 assert index_end <= pending_index_end
150 assert data_end <= pending_data_end
145 assert data_end <= pending_data_end
151 assert sidedata_end <= pending_sidedata_end
146 assert sidedata_end <= pending_sidedata_end
152 self._initial_index_end = index_end
147 self._initial_index_end = index_end
153 self._pending_index_end = pending_index_end
148 self._pending_index_end = pending_index_end
154 self._initial_data_end = data_end
149 self._initial_data_end = data_end
155 self._pending_data_end = pending_data_end
150 self._pending_data_end = pending_data_end
156 self._initial_sidedata_end = sidedata_end
151 self._initial_sidedata_end = sidedata_end
157 self._pending_sidedata_end = pending_sidedata_end
152 self._pending_sidedata_end = pending_sidedata_end
158 if use_pending:
153 if use_pending:
159 self._index_end = self._pending_index_end
154 self._index_end = self._pending_index_end
160 self._data_end = self._pending_data_end
155 self._data_end = self._pending_data_end
161 self._sidedata_end = self._pending_sidedata_end
156 self._sidedata_end = self._pending_sidedata_end
162 else:
157 else:
163 self._index_end = self._initial_index_end
158 self._index_end = self._initial_index_end
164 self._data_end = self._initial_data_end
159 self._data_end = self._initial_data_end
165 self._sidedata_end = self._initial_sidedata_end
160 self._sidedata_end = self._initial_sidedata_end
166 self.default_compression_header = default_compression_header
161 self.default_compression_header = default_compression_header
167
162
168 def index_filepath(self):
163 def index_filepath(self):
169 """file path to the current index file associated to this docket"""
164 """file path to the current index file associated to this docket"""
170 # very simplistic version at first
165 # very simplistic version at first
171 if self._index_uuid is None:
166 if self._index_uuid is None:
172 self._index_uuid = make_uid()
167 self._index_uuid = make_uid()
173 return b"%s-%s.idx" % (self._radix, self._index_uuid)
168 return b"%s-%s.idx" % (self._radix, self._index_uuid)
174
169
175 def new_index_file(self):
170 def new_index_file(self):
176 """switch index file to a new UID
171 """switch index file to a new UID
177
172
178 The previous index UID is moved to the "older" list."""
173 The previous index UID is moved to the "older" list."""
179 old = (self._index_uuid, self._index_end)
174 old = (self._index_uuid, self._index_end)
180 self._older_index_uuids.insert(0, old)
175 self._older_index_uuids.insert(0, old)
181 self._index_uuid = make_uid()
176 self._index_uuid = make_uid()
182 return self.index_filepath()
177 return self.index_filepath()
183
178
184 def old_index_filepaths(self, include_empty=True):
179 def old_index_filepaths(self, include_empty=True):
185 """yield file path to older index files associated to this docket"""
180 """yield file path to older index files associated to this docket"""
186 # very simplistic version at first
181 # very simplistic version at first
187 for uuid, size in self._older_index_uuids:
182 for uuid, size in self._older_index_uuids:
188 if include_empty or size > 0:
183 if include_empty or size > 0:
189 yield b"%s-%s.idx" % (self._radix, uuid)
184 yield b"%s-%s.idx" % (self._radix, uuid)
190
185
191 def data_filepath(self):
186 def data_filepath(self):
192 """file path to the current data file associated to this docket"""
187 """file path to the current data file associated to this docket"""
193 # very simplistic version at first
188 # very simplistic version at first
194 if self._data_uuid is None:
189 if self._data_uuid is None:
195 self._data_uuid = make_uid()
190 self._data_uuid = make_uid()
196 return b"%s-%s.dat" % (self._radix, self._data_uuid)
191 return b"%s-%s.dat" % (self._radix, self._data_uuid)
197
192
198 def new_data_file(self):
193 def new_data_file(self):
199 """switch data file to a new UID
194 """switch data file to a new UID
200
195
201 The previous data UID is moved to the "older" list."""
196 The previous data UID is moved to the "older" list."""
202 old = (self._data_uuid, self._data_end)
197 old = (self._data_uuid, self._data_end)
203 self._older_data_uuids.insert(0, old)
198 self._older_data_uuids.insert(0, old)
204 self._data_uuid = make_uid()
199 self._data_uuid = make_uid()
205 return self.data_filepath()
200 return self.data_filepath()
206
201
207 def old_data_filepaths(self, include_empty=True):
202 def old_data_filepaths(self, include_empty=True):
208 """yield file path to older data files associated to this docket"""
203 """yield file path to older data files associated to this docket"""
209 # very simplistic version at first
204 # very simplistic version at first
210 for uuid, size in self._older_data_uuids:
205 for uuid, size in self._older_data_uuids:
211 if include_empty or size > 0:
206 if include_empty or size > 0:
212 yield b"%s-%s.dat" % (self._radix, uuid)
207 yield b"%s-%s.dat" % (self._radix, uuid)
213
208
214 def sidedata_filepath(self):
209 def sidedata_filepath(self):
215 """file path to the current sidedata file associated to this docket"""
210 """file path to the current sidedata file associated to this docket"""
216 # very simplistic version at first
211 # very simplistic version at first
217 if self._sidedata_uuid is None:
212 if self._sidedata_uuid is None:
218 self._sidedata_uuid = make_uid()
213 self._sidedata_uuid = make_uid()
219 return b"%s-%s.sda" % (self._radix, self._sidedata_uuid)
214 return b"%s-%s.sda" % (self._radix, self._sidedata_uuid)
220
215
221 def new_sidedata_file(self):
216 def new_sidedata_file(self):
222 """switch sidedata file to a new UID
217 """switch sidedata file to a new UID
223
218
224 The previous sidedata UID is moved to the "older" list."""
219 The previous sidedata UID is moved to the "older" list."""
225 old = (self._sidedata_uuid, self._sidedata_end)
220 old = (self._sidedata_uuid, self._sidedata_end)
226 self._older_sidedata_uuids.insert(0, old)
221 self._older_sidedata_uuids.insert(0, old)
227 self._sidedata_uuid = make_uid()
222 self._sidedata_uuid = make_uid()
228 return self.sidedata_filepath()
223 return self.sidedata_filepath()
229
224
230 def old_sidedata_filepaths(self, include_empty=True):
225 def old_sidedata_filepaths(self, include_empty=True):
231 """yield file path to older sidedata files associated to this docket"""
226 """yield file path to older sidedata files associated to this docket"""
232 # very simplistic version at first
227 # very simplistic version at first
233 for uuid, size in self._older_sidedata_uuids:
228 for uuid, size in self._older_sidedata_uuids:
234 if include_empty or size > 0:
229 if include_empty or size > 0:
235 yield b"%s-%s.sda" % (self._radix, uuid)
230 yield b"%s-%s.sda" % (self._radix, uuid)
236
231
237 @property
232 @property
238 def index_end(self):
233 def index_end(self):
239 return self._index_end
234 return self._index_end
240
235
241 @index_end.setter
236 @index_end.setter
242 def index_end(self, new_size):
237 def index_end(self, new_size):
243 if new_size != self._index_end:
238 if new_size != self._index_end:
244 self._index_end = new_size
239 self._index_end = new_size
245 self._dirty = True
240 self._dirty = True
246
241
247 @property
242 @property
248 def data_end(self):
243 def data_end(self):
249 return self._data_end
244 return self._data_end
250
245
251 @data_end.setter
246 @data_end.setter
252 def data_end(self, new_size):
247 def data_end(self, new_size):
253 if new_size != self._data_end:
248 if new_size != self._data_end:
254 self._data_end = new_size
249 self._data_end = new_size
255 self._dirty = True
250 self._dirty = True
256
251
257 @property
252 @property
258 def sidedata_end(self):
253 def sidedata_end(self):
259 return self._sidedata_end
254 return self._sidedata_end
260
255
261 @sidedata_end.setter
256 @sidedata_end.setter
262 def sidedata_end(self, new_size):
257 def sidedata_end(self, new_size):
263 if new_size != self._sidedata_end:
258 if new_size != self._sidedata_end:
264 self._sidedata_end = new_size
259 self._sidedata_end = new_size
265 self._dirty = True
260 self._dirty = True
266
261
267 def write(self, transaction, pending=False, stripping=False):
262 def write(self, transaction, pending=False, stripping=False):
268 """write the modification of disk if any
263 """write the modification of disk if any
269
264
270 This make the new content visible to all process"""
265 This make the new content visible to all process"""
271 if not self._dirty:
266 if not self._dirty:
272 return False
267 return False
273 else:
268 else:
274 if self._read_only:
269 if self._read_only:
275 msg = b'writing read-only docket: %s'
270 msg = b'writing read-only docket: %s'
276 msg %= self._path
271 msg %= self._path
277 raise error.ProgrammingError(msg)
272 raise error.ProgrammingError(msg)
278 if not stripping:
273 if not stripping:
279 # XXX we could, leverage the docket while stripping. However it
274 # XXX we could, leverage the docket while stripping. However it
280 # is not powerfull enough at the time of this comment
275 # is not powerfull enough at the time of this comment
281 transaction.addbackup(self._path, location=b'store')
276 transaction.addbackup(self._path, location=b'store')
282 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
277 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
283 f.write(self._serialize(pending=pending))
278 f.write(self._serialize(pending=pending))
284 # if pending we still need to the write final data eventually
279 # if pending we still need to the write final data eventually
285 self._dirty = pending
280 self._dirty = pending
286 return True
281 return True
287
282
288 def _serialize(self, pending=False):
283 def _serialize(self, pending=False):
289 if pending:
284 if pending:
290 official_index_end = self._initial_index_end
285 official_index_end = self._initial_index_end
291 official_data_end = self._initial_data_end
286 official_data_end = self._initial_data_end
292 official_sidedata_end = self._initial_sidedata_end
287 official_sidedata_end = self._initial_sidedata_end
293 else:
288 else:
294 official_index_end = self._index_end
289 official_index_end = self._index_end
295 official_data_end = self._data_end
290 official_data_end = self._data_end
296 official_sidedata_end = self._sidedata_end
291 official_sidedata_end = self._sidedata_end
297
292
298 # this assert should be True as long as we have a single index filename
293 # this assert should be True as long as we have a single index filename
299 assert official_data_end <= self._data_end
294 assert official_data_end <= self._data_end
300 assert official_sidedata_end <= self._sidedata_end
295 assert official_sidedata_end <= self._sidedata_end
301 data = (
296 data = (
302 self._version_header,
297 self._version_header,
303 len(self._index_uuid),
298 len(self._index_uuid),
304 len(self._older_index_uuids),
299 len(self._older_index_uuids),
305 len(self._data_uuid),
300 len(self._data_uuid),
306 len(self._older_data_uuids),
301 len(self._older_data_uuids),
307 len(self._sidedata_uuid),
302 len(self._sidedata_uuid),
308 len(self._older_sidedata_uuids),
303 len(self._older_sidedata_uuids),
309 official_index_end,
304 official_index_end,
310 self._index_end,
305 self._index_end,
311 official_data_end,
306 official_data_end,
312 self._data_end,
307 self._data_end,
313 official_sidedata_end,
308 official_sidedata_end,
314 self._sidedata_end,
309 self._sidedata_end,
315 self.default_compression_header,
310 self.default_compression_header,
316 )
311 )
317 s = []
312 s = []
318 s.append(S_HEADER.pack(*data))
313 s.append(S_HEADER.pack(*data))
319
314
320 s.append(self._index_uuid)
315 s.append(self._index_uuid)
321 for u, size in self._older_index_uuids:
316 for u, size in self._older_index_uuids:
322 s.append(S_OLD_UID.pack(len(u), size))
317 s.append(S_OLD_UID.pack(len(u), size))
323 for u, size in self._older_index_uuids:
318 for u, size in self._older_index_uuids:
324 s.append(u)
319 s.append(u)
325
320
326 s.append(self._data_uuid)
321 s.append(self._data_uuid)
327 for u, size in self._older_data_uuids:
322 for u, size in self._older_data_uuids:
328 s.append(S_OLD_UID.pack(len(u), size))
323 s.append(S_OLD_UID.pack(len(u), size))
329 for u, size in self._older_data_uuids:
324 for u, size in self._older_data_uuids:
330 s.append(u)
325 s.append(u)
331
326
332 s.append(self._sidedata_uuid)
327 s.append(self._sidedata_uuid)
333 for u, size in self._older_sidedata_uuids:
328 for u, size in self._older_sidedata_uuids:
334 s.append(S_OLD_UID.pack(len(u), size))
329 s.append(S_OLD_UID.pack(len(u), size))
335 for u, size in self._older_sidedata_uuids:
330 for u, size in self._older_sidedata_uuids:
336 s.append(u)
331 s.append(u)
337 return b''.join(s)
332 return b''.join(s)
338
333
339
334
340 def default_docket(revlog, version_header):
335 def default_docket(revlog, version_header):
341 """given a revlog version a new docket object for the given revlog"""
336 """given a revlog version a new docket object for the given revlog"""
342 rl_version = version_header & 0xFFFF
337 rl_version = version_header & 0xFFFF
343 if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
338 if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
344 return None
339 return None
345 comp = util.compengines[revlog._compengine].revlogheader()
340 comp = util.compengines[revlog._compengine].revlogheader()
346 docket = RevlogDocket(
341 docket = RevlogDocket(
347 revlog,
342 revlog,
348 version_header=version_header,
343 version_header=version_header,
349 default_compression_header=comp,
344 default_compression_header=comp,
350 )
345 )
351 docket._dirty = True
346 docket._dirty = True
352 return docket
347 return docket
353
348
354
349
355 def _parse_old_uids(get_data, count):
350 def _parse_old_uids(get_data, count):
356 all_sizes = []
351 all_sizes = []
357 all_uids = []
352 all_uids = []
358 for i in range(0, count):
353 for i in range(0, count):
359 raw = get_data(S_OLD_UID.size)
354 raw = get_data(S_OLD_UID.size)
360 all_sizes.append(S_OLD_UID.unpack(raw))
355 all_sizes.append(S_OLD_UID.unpack(raw))
361
356
362 for uid_size, file_size in all_sizes:
357 for uid_size, file_size in all_sizes:
363 uid = get_data(uid_size)
358 uid = get_data(uid_size)
364 all_uids.append((uid, file_size))
359 all_uids.append((uid, file_size))
365 return all_uids
360 return all_uids
366
361
367
362
368 def parse_docket(revlog, data, use_pending=False):
363 def parse_docket(revlog, data, use_pending=False):
369 """given some docket data return a docket object for the given revlog"""
364 """given some docket data return a docket object for the given revlog"""
370 header = S_HEADER.unpack(data[: S_HEADER.size])
365 header = S_HEADER.unpack(data[: S_HEADER.size])
371
366
372 # this is a mutable closure capture used in `get_data`
367 # this is a mutable closure capture used in `get_data`
373 offset = [S_HEADER.size]
368 offset = [S_HEADER.size]
374
369
375 def get_data(size):
370 def get_data(size):
376 """utility closure to access the `size` next bytes"""
371 """utility closure to access the `size` next bytes"""
377 if offset[0] + size > len(data):
372 if offset[0] + size > len(data):
378 # XXX better class
373 # XXX better class
379 msg = b"docket is too short, expected %d got %d"
374 msg = b"docket is too short, expected %d got %d"
380 msg %= (offset[0] + size, len(data))
375 msg %= (offset[0] + size, len(data))
381 raise error.Abort(msg)
376 raise error.Abort(msg)
382 raw = data[offset[0] : offset[0] + size]
377 raw = data[offset[0] : offset[0] + size]
383 offset[0] += size
378 offset[0] += size
384 return raw
379 return raw
385
380
386 iheader = iter(header)
381 iheader = iter(header)
387
382
388 version_header = next(iheader)
383 version_header = next(iheader)
389
384
390 index_uuid_size = next(iheader)
385 index_uuid_size = next(iheader)
391 index_uuid = get_data(index_uuid_size)
386 index_uuid = get_data(index_uuid_size)
392
387
393 older_index_uuid_count = next(iheader)
388 older_index_uuid_count = next(iheader)
394 older_index_uuids = _parse_old_uids(get_data, older_index_uuid_count)
389 older_index_uuids = _parse_old_uids(get_data, older_index_uuid_count)
395
390
396 data_uuid_size = next(iheader)
391 data_uuid_size = next(iheader)
397 data_uuid = get_data(data_uuid_size)
392 data_uuid = get_data(data_uuid_size)
398
393
399 older_data_uuid_count = next(iheader)
394 older_data_uuid_count = next(iheader)
400 older_data_uuids = _parse_old_uids(get_data, older_data_uuid_count)
395 older_data_uuids = _parse_old_uids(get_data, older_data_uuid_count)
401
396
402 sidedata_uuid_size = next(iheader)
397 sidedata_uuid_size = next(iheader)
403 sidedata_uuid = get_data(sidedata_uuid_size)
398 sidedata_uuid = get_data(sidedata_uuid_size)
404
399
405 older_sidedata_uuid_count = next(iheader)
400 older_sidedata_uuid_count = next(iheader)
406 older_sidedata_uuids = _parse_old_uids(get_data, older_sidedata_uuid_count)
401 older_sidedata_uuids = _parse_old_uids(get_data, older_sidedata_uuid_count)
407
402
408 index_size = next(iheader)
403 index_size = next(iheader)
409
404
410 pending_index_size = next(iheader)
405 pending_index_size = next(iheader)
411
406
412 data_size = next(iheader)
407 data_size = next(iheader)
413
408
414 pending_data_size = next(iheader)
409 pending_data_size = next(iheader)
415
410
416 sidedata_size = next(iheader)
411 sidedata_size = next(iheader)
417
412
418 pending_sidedata_size = next(iheader)
413 pending_sidedata_size = next(iheader)
419
414
420 default_compression_header = next(iheader)
415 default_compression_header = next(iheader)
421
416
422 docket = RevlogDocket(
417 docket = RevlogDocket(
423 revlog,
418 revlog,
424 use_pending=use_pending,
419 use_pending=use_pending,
425 version_header=version_header,
420 version_header=version_header,
426 index_uuid=index_uuid,
421 index_uuid=index_uuid,
427 older_index_uuids=older_index_uuids,
422 older_index_uuids=older_index_uuids,
428 data_uuid=data_uuid,
423 data_uuid=data_uuid,
429 older_data_uuids=older_data_uuids,
424 older_data_uuids=older_data_uuids,
430 sidedata_uuid=sidedata_uuid,
425 sidedata_uuid=sidedata_uuid,
431 older_sidedata_uuids=older_sidedata_uuids,
426 older_sidedata_uuids=older_sidedata_uuids,
432 index_end=index_size,
427 index_end=index_size,
433 pending_index_end=pending_index_size,
428 pending_index_end=pending_index_size,
434 data_end=data_size,
429 data_end=data_size,
435 pending_data_end=pending_data_size,
430 pending_data_end=pending_data_size,
436 sidedata_end=sidedata_size,
431 sidedata_end=sidedata_size,
437 pending_sidedata_end=pending_sidedata_size,
432 pending_sidedata_end=pending_sidedata_size,
438 default_compression_header=default_compression_header,
433 default_compression_header=default_compression_header,
439 )
434 )
440 return docket
435 return docket
General Comments 0
You need to be logged in to leave comments. Login now