##// END OF EJS Templates
revlogutils: unconditionally pass version to random seed...
Gregory Szorc -
r49764:0aae0e2e default
parent child Browse files
Show More
@@ -1,435 +1,431 b''
1 # docket - code related to revlog "docket"
1 # docket - code related to revlog "docket"
2 #
2 #
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
3 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 ### Revlog docket file
8 ### Revlog docket file
9 #
9 #
10 # The revlog is stored on disk using multiple files:
10 # The revlog is stored on disk using multiple files:
11 #
11 #
12 # * a small docket file, containing metadata and a pointer,
12 # * a small docket file, containing metadata and a pointer,
13 #
13 #
14 # * an index file, containing fixed width information about revisions,
14 # * an index file, containing fixed width information about revisions,
15 #
15 #
16 # * a data file, containing variable width data for these revisions,
16 # * a data file, containing variable width data for these revisions,
17
17
18
18
19 import errno
19 import errno
20 import os
20 import os
21 import random
21 import random
22 import struct
22 import struct
23
23
24 from .. import (
24 from .. import (
25 encoding,
25 encoding,
26 error,
26 error,
27 node,
27 node,
28 pycompat,
29 util,
28 util,
30 )
29 )
31
30
32 from . import (
31 from . import (
33 constants,
32 constants,
34 )
33 )
35
34
36
35
37 def make_uid(id_size=8):
36 def make_uid(id_size=8):
38 """return a new unique identifier.
37 """return a new unique identifier.
39
38
40 The identifier is random and composed of ascii characters."""
39 The identifier is random and composed of ascii characters."""
41 # size we "hex" the result we need half the number of bits to have a final
40 # size we "hex" the result we need half the number of bits to have a final
42 # uuid of size ID_SIZE
41 # uuid of size ID_SIZE
43 return node.hex(os.urandom(id_size // 2))
42 return node.hex(os.urandom(id_size // 2))
44
43
45
44
46 # some special test logic to avoid anoying random output in the test
45 # some special test logic to avoid anoying random output in the test
47 stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
46 stable_docket_file = encoding.environ.get(b'HGTEST_UUIDFILE')
48
47
49 if stable_docket_file:
48 if stable_docket_file:
50
49
51 def make_uid(id_size=8):
50 def make_uid(id_size=8):
52 try:
51 try:
53 with open(stable_docket_file, mode='rb') as f:
52 with open(stable_docket_file, mode='rb') as f:
54 seed = f.read().strip()
53 seed = f.read().strip()
55 except IOError as inst:
54 except IOError as inst:
56 if inst.errno != errno.ENOENT:
55 if inst.errno != errno.ENOENT:
57 raise
56 raise
58 seed = b'04' # chosen by a fair dice roll. garanteed to be random
57 seed = b'04' # chosen by a fair dice roll. garanteed to be random
59 iter_seed = iter(seed)
58 iter_seed = iter(seed)
60 # some basic circular sum hashing on 64 bits
59 # some basic circular sum hashing on 64 bits
61 int_seed = 0
60 int_seed = 0
62 low_mask = int('1' * 35, 2)
61 low_mask = int('1' * 35, 2)
63 for i in iter_seed:
62 for i in iter_seed:
64 high_part = int_seed >> 35
63 high_part = int_seed >> 35
65 low_part = (int_seed & low_mask) << 28
64 low_part = (int_seed & low_mask) << 28
66 int_seed = high_part + low_part + i
65 int_seed = high_part + low_part + i
67 r = random.Random()
66 r = random.Random()
68 if pycompat.ispy3:
67 r.seed(int_seed, version=1)
69 r.seed(int_seed, version=1)
70 else:
71 r.seed(int_seed)
72 # once we drop python 3.8 support we can simply use r.randbytes
68 # once we drop python 3.8 support we can simply use r.randbytes
73 raw = r.getrandbits(id_size * 4)
69 raw = r.getrandbits(id_size * 4)
74 assert id_size == 8
70 assert id_size == 8
75 p = struct.pack('>L', raw)
71 p = struct.pack('>L', raw)
76 new = node.hex(p)
72 new = node.hex(p)
77 with open(stable_docket_file, 'wb') as f:
73 with open(stable_docket_file, 'wb') as f:
78 f.write(new)
74 f.write(new)
79 return new
75 return new
80
76
81
77
82 # Docket format
78 # Docket format
83 #
79 #
84 # * 4 bytes: revlog version
80 # * 4 bytes: revlog version
85 # | This is mandatory as docket must be compatible with the previous
81 # | This is mandatory as docket must be compatible with the previous
86 # | revlog index header.
82 # | revlog index header.
87 # * 1 bytes: size of index uuid
83 # * 1 bytes: size of index uuid
88 # * 1 bytes: number of outdated index uuid
84 # * 1 bytes: number of outdated index uuid
89 # * 1 bytes: size of data uuid
85 # * 1 bytes: size of data uuid
90 # * 1 bytes: number of outdated data uuid
86 # * 1 bytes: number of outdated data uuid
91 # * 1 bytes: size of sizedata uuid
87 # * 1 bytes: size of sizedata uuid
92 # * 1 bytes: number of outdated data uuid
88 # * 1 bytes: number of outdated data uuid
93 # * 8 bytes: size of index-data
89 # * 8 bytes: size of index-data
94 # * 8 bytes: pending size of index-data
90 # * 8 bytes: pending size of index-data
95 # * 8 bytes: size of data
91 # * 8 bytes: size of data
96 # * 8 bytes: size of sidedata
92 # * 8 bytes: size of sidedata
97 # * 8 bytes: pending size of data
93 # * 8 bytes: pending size of data
98 # * 8 bytes: pending size of sidedata
94 # * 8 bytes: pending size of sidedata
99 # * 1 bytes: default compression header
95 # * 1 bytes: default compression header
100 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBLLLLLLc')
96 S_HEADER = struct.Struct(constants.INDEX_HEADER_FMT + b'BBBBBBLLLLLLc')
101 # * 1 bytes: size of index uuid
97 # * 1 bytes: size of index uuid
102 # * 8 bytes: size of file
98 # * 8 bytes: size of file
103 S_OLD_UID = struct.Struct('>BL')
99 S_OLD_UID = struct.Struct('>BL')
104
100
105
101
106 class RevlogDocket(object):
102 class RevlogDocket(object):
107 """metadata associated with revlog"""
103 """metadata associated with revlog"""
108
104
109 def __init__(
105 def __init__(
110 self,
106 self,
111 revlog,
107 revlog,
112 use_pending=False,
108 use_pending=False,
113 version_header=None,
109 version_header=None,
114 index_uuid=None,
110 index_uuid=None,
115 older_index_uuids=(),
111 older_index_uuids=(),
116 data_uuid=None,
112 data_uuid=None,
117 older_data_uuids=(),
113 older_data_uuids=(),
118 sidedata_uuid=None,
114 sidedata_uuid=None,
119 older_sidedata_uuids=(),
115 older_sidedata_uuids=(),
120 index_end=0,
116 index_end=0,
121 pending_index_end=0,
117 pending_index_end=0,
122 data_end=0,
118 data_end=0,
123 pending_data_end=0,
119 pending_data_end=0,
124 sidedata_end=0,
120 sidedata_end=0,
125 pending_sidedata_end=0,
121 pending_sidedata_end=0,
126 default_compression_header=None,
122 default_compression_header=None,
127 ):
123 ):
128 self._version_header = version_header
124 self._version_header = version_header
129 self._read_only = bool(use_pending)
125 self._read_only = bool(use_pending)
130 self._dirty = False
126 self._dirty = False
131 self._radix = revlog.radix
127 self._radix = revlog.radix
132 self._path = revlog._docket_file
128 self._path = revlog._docket_file
133 self._opener = revlog.opener
129 self._opener = revlog.opener
134 self._index_uuid = index_uuid
130 self._index_uuid = index_uuid
135 self._older_index_uuids = older_index_uuids
131 self._older_index_uuids = older_index_uuids
136 self._data_uuid = data_uuid
132 self._data_uuid = data_uuid
137 self._older_data_uuids = older_data_uuids
133 self._older_data_uuids = older_data_uuids
138 self._sidedata_uuid = sidedata_uuid
134 self._sidedata_uuid = sidedata_uuid
139 self._older_sidedata_uuids = older_sidedata_uuids
135 self._older_sidedata_uuids = older_sidedata_uuids
140 assert not set(older_index_uuids) & set(older_data_uuids)
136 assert not set(older_index_uuids) & set(older_data_uuids)
141 assert not set(older_data_uuids) & set(older_sidedata_uuids)
137 assert not set(older_data_uuids) & set(older_sidedata_uuids)
142 assert not set(older_index_uuids) & set(older_sidedata_uuids)
138 assert not set(older_index_uuids) & set(older_sidedata_uuids)
143 # thes asserts should be True as long as we have a single index filename
139 # thes asserts should be True as long as we have a single index filename
144 assert index_end <= pending_index_end
140 assert index_end <= pending_index_end
145 assert data_end <= pending_data_end
141 assert data_end <= pending_data_end
146 assert sidedata_end <= pending_sidedata_end
142 assert sidedata_end <= pending_sidedata_end
147 self._initial_index_end = index_end
143 self._initial_index_end = index_end
148 self._pending_index_end = pending_index_end
144 self._pending_index_end = pending_index_end
149 self._initial_data_end = data_end
145 self._initial_data_end = data_end
150 self._pending_data_end = pending_data_end
146 self._pending_data_end = pending_data_end
151 self._initial_sidedata_end = sidedata_end
147 self._initial_sidedata_end = sidedata_end
152 self._pending_sidedata_end = pending_sidedata_end
148 self._pending_sidedata_end = pending_sidedata_end
153 if use_pending:
149 if use_pending:
154 self._index_end = self._pending_index_end
150 self._index_end = self._pending_index_end
155 self._data_end = self._pending_data_end
151 self._data_end = self._pending_data_end
156 self._sidedata_end = self._pending_sidedata_end
152 self._sidedata_end = self._pending_sidedata_end
157 else:
153 else:
158 self._index_end = self._initial_index_end
154 self._index_end = self._initial_index_end
159 self._data_end = self._initial_data_end
155 self._data_end = self._initial_data_end
160 self._sidedata_end = self._initial_sidedata_end
156 self._sidedata_end = self._initial_sidedata_end
161 self.default_compression_header = default_compression_header
157 self.default_compression_header = default_compression_header
162
158
163 def index_filepath(self):
159 def index_filepath(self):
164 """file path to the current index file associated to this docket"""
160 """file path to the current index file associated to this docket"""
165 # very simplistic version at first
161 # very simplistic version at first
166 if self._index_uuid is None:
162 if self._index_uuid is None:
167 self._index_uuid = make_uid()
163 self._index_uuid = make_uid()
168 return b"%s-%s.idx" % (self._radix, self._index_uuid)
164 return b"%s-%s.idx" % (self._radix, self._index_uuid)
169
165
170 def new_index_file(self):
166 def new_index_file(self):
171 """switch index file to a new UID
167 """switch index file to a new UID
172
168
173 The previous index UID is moved to the "older" list."""
169 The previous index UID is moved to the "older" list."""
174 old = (self._index_uuid, self._index_end)
170 old = (self._index_uuid, self._index_end)
175 self._older_index_uuids.insert(0, old)
171 self._older_index_uuids.insert(0, old)
176 self._index_uuid = make_uid()
172 self._index_uuid = make_uid()
177 return self.index_filepath()
173 return self.index_filepath()
178
174
179 def old_index_filepaths(self, include_empty=True):
175 def old_index_filepaths(self, include_empty=True):
180 """yield file path to older index files associated to this docket"""
176 """yield file path to older index files associated to this docket"""
181 # very simplistic version at first
177 # very simplistic version at first
182 for uuid, size in self._older_index_uuids:
178 for uuid, size in self._older_index_uuids:
183 if include_empty or size > 0:
179 if include_empty or size > 0:
184 yield b"%s-%s.idx" % (self._radix, uuid)
180 yield b"%s-%s.idx" % (self._radix, uuid)
185
181
186 def data_filepath(self):
182 def data_filepath(self):
187 """file path to the current data file associated to this docket"""
183 """file path to the current data file associated to this docket"""
188 # very simplistic version at first
184 # very simplistic version at first
189 if self._data_uuid is None:
185 if self._data_uuid is None:
190 self._data_uuid = make_uid()
186 self._data_uuid = make_uid()
191 return b"%s-%s.dat" % (self._radix, self._data_uuid)
187 return b"%s-%s.dat" % (self._radix, self._data_uuid)
192
188
193 def new_data_file(self):
189 def new_data_file(self):
194 """switch data file to a new UID
190 """switch data file to a new UID
195
191
196 The previous data UID is moved to the "older" list."""
192 The previous data UID is moved to the "older" list."""
197 old = (self._data_uuid, self._data_end)
193 old = (self._data_uuid, self._data_end)
198 self._older_data_uuids.insert(0, old)
194 self._older_data_uuids.insert(0, old)
199 self._data_uuid = make_uid()
195 self._data_uuid = make_uid()
200 return self.data_filepath()
196 return self.data_filepath()
201
197
202 def old_data_filepaths(self, include_empty=True):
198 def old_data_filepaths(self, include_empty=True):
203 """yield file path to older data files associated to this docket"""
199 """yield file path to older data files associated to this docket"""
204 # very simplistic version at first
200 # very simplistic version at first
205 for uuid, size in self._older_data_uuids:
201 for uuid, size in self._older_data_uuids:
206 if include_empty or size > 0:
202 if include_empty or size > 0:
207 yield b"%s-%s.dat" % (self._radix, uuid)
203 yield b"%s-%s.dat" % (self._radix, uuid)
208
204
209 def sidedata_filepath(self):
205 def sidedata_filepath(self):
210 """file path to the current sidedata file associated to this docket"""
206 """file path to the current sidedata file associated to this docket"""
211 # very simplistic version at first
207 # very simplistic version at first
212 if self._sidedata_uuid is None:
208 if self._sidedata_uuid is None:
213 self._sidedata_uuid = make_uid()
209 self._sidedata_uuid = make_uid()
214 return b"%s-%s.sda" % (self._radix, self._sidedata_uuid)
210 return b"%s-%s.sda" % (self._radix, self._sidedata_uuid)
215
211
216 def new_sidedata_file(self):
212 def new_sidedata_file(self):
217 """switch sidedata file to a new UID
213 """switch sidedata file to a new UID
218
214
219 The previous sidedata UID is moved to the "older" list."""
215 The previous sidedata UID is moved to the "older" list."""
220 old = (self._sidedata_uuid, self._sidedata_end)
216 old = (self._sidedata_uuid, self._sidedata_end)
221 self._older_sidedata_uuids.insert(0, old)
217 self._older_sidedata_uuids.insert(0, old)
222 self._sidedata_uuid = make_uid()
218 self._sidedata_uuid = make_uid()
223 return self.sidedata_filepath()
219 return self.sidedata_filepath()
224
220
225 def old_sidedata_filepaths(self, include_empty=True):
221 def old_sidedata_filepaths(self, include_empty=True):
226 """yield file path to older sidedata files associated to this docket"""
222 """yield file path to older sidedata files associated to this docket"""
227 # very simplistic version at first
223 # very simplistic version at first
228 for uuid, size in self._older_sidedata_uuids:
224 for uuid, size in self._older_sidedata_uuids:
229 if include_empty or size > 0:
225 if include_empty or size > 0:
230 yield b"%s-%s.sda" % (self._radix, uuid)
226 yield b"%s-%s.sda" % (self._radix, uuid)
231
227
232 @property
228 @property
233 def index_end(self):
229 def index_end(self):
234 return self._index_end
230 return self._index_end
235
231
236 @index_end.setter
232 @index_end.setter
237 def index_end(self, new_size):
233 def index_end(self, new_size):
238 if new_size != self._index_end:
234 if new_size != self._index_end:
239 self._index_end = new_size
235 self._index_end = new_size
240 self._dirty = True
236 self._dirty = True
241
237
242 @property
238 @property
243 def data_end(self):
239 def data_end(self):
244 return self._data_end
240 return self._data_end
245
241
246 @data_end.setter
242 @data_end.setter
247 def data_end(self, new_size):
243 def data_end(self, new_size):
248 if new_size != self._data_end:
244 if new_size != self._data_end:
249 self._data_end = new_size
245 self._data_end = new_size
250 self._dirty = True
246 self._dirty = True
251
247
252 @property
248 @property
253 def sidedata_end(self):
249 def sidedata_end(self):
254 return self._sidedata_end
250 return self._sidedata_end
255
251
256 @sidedata_end.setter
252 @sidedata_end.setter
257 def sidedata_end(self, new_size):
253 def sidedata_end(self, new_size):
258 if new_size != self._sidedata_end:
254 if new_size != self._sidedata_end:
259 self._sidedata_end = new_size
255 self._sidedata_end = new_size
260 self._dirty = True
256 self._dirty = True
261
257
262 def write(self, transaction, pending=False, stripping=False):
258 def write(self, transaction, pending=False, stripping=False):
263 """write the modification of disk if any
259 """write the modification of disk if any
264
260
265 This make the new content visible to all process"""
261 This make the new content visible to all process"""
266 if not self._dirty:
262 if not self._dirty:
267 return False
263 return False
268 else:
264 else:
269 if self._read_only:
265 if self._read_only:
270 msg = b'writing read-only docket: %s'
266 msg = b'writing read-only docket: %s'
271 msg %= self._path
267 msg %= self._path
272 raise error.ProgrammingError(msg)
268 raise error.ProgrammingError(msg)
273 if not stripping:
269 if not stripping:
274 # XXX we could, leverage the docket while stripping. However it
270 # XXX we could, leverage the docket while stripping. However it
275 # is not powerfull enough at the time of this comment
271 # is not powerfull enough at the time of this comment
276 transaction.addbackup(self._path, location=b'store')
272 transaction.addbackup(self._path, location=b'store')
277 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
273 with self._opener(self._path, mode=b'w', atomictemp=True) as f:
278 f.write(self._serialize(pending=pending))
274 f.write(self._serialize(pending=pending))
279 # if pending we still need to the write final data eventually
275 # if pending we still need to the write final data eventually
280 self._dirty = pending
276 self._dirty = pending
281 return True
277 return True
282
278
283 def _serialize(self, pending=False):
279 def _serialize(self, pending=False):
284 if pending:
280 if pending:
285 official_index_end = self._initial_index_end
281 official_index_end = self._initial_index_end
286 official_data_end = self._initial_data_end
282 official_data_end = self._initial_data_end
287 official_sidedata_end = self._initial_sidedata_end
283 official_sidedata_end = self._initial_sidedata_end
288 else:
284 else:
289 official_index_end = self._index_end
285 official_index_end = self._index_end
290 official_data_end = self._data_end
286 official_data_end = self._data_end
291 official_sidedata_end = self._sidedata_end
287 official_sidedata_end = self._sidedata_end
292
288
293 # this assert should be True as long as we have a single index filename
289 # this assert should be True as long as we have a single index filename
294 assert official_data_end <= self._data_end
290 assert official_data_end <= self._data_end
295 assert official_sidedata_end <= self._sidedata_end
291 assert official_sidedata_end <= self._sidedata_end
296 data = (
292 data = (
297 self._version_header,
293 self._version_header,
298 len(self._index_uuid),
294 len(self._index_uuid),
299 len(self._older_index_uuids),
295 len(self._older_index_uuids),
300 len(self._data_uuid),
296 len(self._data_uuid),
301 len(self._older_data_uuids),
297 len(self._older_data_uuids),
302 len(self._sidedata_uuid),
298 len(self._sidedata_uuid),
303 len(self._older_sidedata_uuids),
299 len(self._older_sidedata_uuids),
304 official_index_end,
300 official_index_end,
305 self._index_end,
301 self._index_end,
306 official_data_end,
302 official_data_end,
307 self._data_end,
303 self._data_end,
308 official_sidedata_end,
304 official_sidedata_end,
309 self._sidedata_end,
305 self._sidedata_end,
310 self.default_compression_header,
306 self.default_compression_header,
311 )
307 )
312 s = []
308 s = []
313 s.append(S_HEADER.pack(*data))
309 s.append(S_HEADER.pack(*data))
314
310
315 s.append(self._index_uuid)
311 s.append(self._index_uuid)
316 for u, size in self._older_index_uuids:
312 for u, size in self._older_index_uuids:
317 s.append(S_OLD_UID.pack(len(u), size))
313 s.append(S_OLD_UID.pack(len(u), size))
318 for u, size in self._older_index_uuids:
314 for u, size in self._older_index_uuids:
319 s.append(u)
315 s.append(u)
320
316
321 s.append(self._data_uuid)
317 s.append(self._data_uuid)
322 for u, size in self._older_data_uuids:
318 for u, size in self._older_data_uuids:
323 s.append(S_OLD_UID.pack(len(u), size))
319 s.append(S_OLD_UID.pack(len(u), size))
324 for u, size in self._older_data_uuids:
320 for u, size in self._older_data_uuids:
325 s.append(u)
321 s.append(u)
326
322
327 s.append(self._sidedata_uuid)
323 s.append(self._sidedata_uuid)
328 for u, size in self._older_sidedata_uuids:
324 for u, size in self._older_sidedata_uuids:
329 s.append(S_OLD_UID.pack(len(u), size))
325 s.append(S_OLD_UID.pack(len(u), size))
330 for u, size in self._older_sidedata_uuids:
326 for u, size in self._older_sidedata_uuids:
331 s.append(u)
327 s.append(u)
332 return b''.join(s)
328 return b''.join(s)
333
329
334
330
335 def default_docket(revlog, version_header):
331 def default_docket(revlog, version_header):
336 """given a revlog version a new docket object for the given revlog"""
332 """given a revlog version a new docket object for the given revlog"""
337 rl_version = version_header & 0xFFFF
333 rl_version = version_header & 0xFFFF
338 if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
334 if rl_version not in (constants.REVLOGV2, constants.CHANGELOGV2):
339 return None
335 return None
340 comp = util.compengines[revlog._compengine].revlogheader()
336 comp = util.compengines[revlog._compengine].revlogheader()
341 docket = RevlogDocket(
337 docket = RevlogDocket(
342 revlog,
338 revlog,
343 version_header=version_header,
339 version_header=version_header,
344 default_compression_header=comp,
340 default_compression_header=comp,
345 )
341 )
346 docket._dirty = True
342 docket._dirty = True
347 return docket
343 return docket
348
344
349
345
350 def _parse_old_uids(get_data, count):
346 def _parse_old_uids(get_data, count):
351 all_sizes = []
347 all_sizes = []
352 all_uids = []
348 all_uids = []
353 for i in range(0, count):
349 for i in range(0, count):
354 raw = get_data(S_OLD_UID.size)
350 raw = get_data(S_OLD_UID.size)
355 all_sizes.append(S_OLD_UID.unpack(raw))
351 all_sizes.append(S_OLD_UID.unpack(raw))
356
352
357 for uid_size, file_size in all_sizes:
353 for uid_size, file_size in all_sizes:
358 uid = get_data(uid_size)
354 uid = get_data(uid_size)
359 all_uids.append((uid, file_size))
355 all_uids.append((uid, file_size))
360 return all_uids
356 return all_uids
361
357
362
358
363 def parse_docket(revlog, data, use_pending=False):
359 def parse_docket(revlog, data, use_pending=False):
364 """given some docket data return a docket object for the given revlog"""
360 """given some docket data return a docket object for the given revlog"""
365 header = S_HEADER.unpack(data[: S_HEADER.size])
361 header = S_HEADER.unpack(data[: S_HEADER.size])
366
362
367 # this is a mutable closure capture used in `get_data`
363 # this is a mutable closure capture used in `get_data`
368 offset = [S_HEADER.size]
364 offset = [S_HEADER.size]
369
365
370 def get_data(size):
366 def get_data(size):
371 """utility closure to access the `size` next bytes"""
367 """utility closure to access the `size` next bytes"""
372 if offset[0] + size > len(data):
368 if offset[0] + size > len(data):
373 # XXX better class
369 # XXX better class
374 msg = b"docket is too short, expected %d got %d"
370 msg = b"docket is too short, expected %d got %d"
375 msg %= (offset[0] + size, len(data))
371 msg %= (offset[0] + size, len(data))
376 raise error.Abort(msg)
372 raise error.Abort(msg)
377 raw = data[offset[0] : offset[0] + size]
373 raw = data[offset[0] : offset[0] + size]
378 offset[0] += size
374 offset[0] += size
379 return raw
375 return raw
380
376
381 iheader = iter(header)
377 iheader = iter(header)
382
378
383 version_header = next(iheader)
379 version_header = next(iheader)
384
380
385 index_uuid_size = next(iheader)
381 index_uuid_size = next(iheader)
386 index_uuid = get_data(index_uuid_size)
382 index_uuid = get_data(index_uuid_size)
387
383
388 older_index_uuid_count = next(iheader)
384 older_index_uuid_count = next(iheader)
389 older_index_uuids = _parse_old_uids(get_data, older_index_uuid_count)
385 older_index_uuids = _parse_old_uids(get_data, older_index_uuid_count)
390
386
391 data_uuid_size = next(iheader)
387 data_uuid_size = next(iheader)
392 data_uuid = get_data(data_uuid_size)
388 data_uuid = get_data(data_uuid_size)
393
389
394 older_data_uuid_count = next(iheader)
390 older_data_uuid_count = next(iheader)
395 older_data_uuids = _parse_old_uids(get_data, older_data_uuid_count)
391 older_data_uuids = _parse_old_uids(get_data, older_data_uuid_count)
396
392
397 sidedata_uuid_size = next(iheader)
393 sidedata_uuid_size = next(iheader)
398 sidedata_uuid = get_data(sidedata_uuid_size)
394 sidedata_uuid = get_data(sidedata_uuid_size)
399
395
400 older_sidedata_uuid_count = next(iheader)
396 older_sidedata_uuid_count = next(iheader)
401 older_sidedata_uuids = _parse_old_uids(get_data, older_sidedata_uuid_count)
397 older_sidedata_uuids = _parse_old_uids(get_data, older_sidedata_uuid_count)
402
398
403 index_size = next(iheader)
399 index_size = next(iheader)
404
400
405 pending_index_size = next(iheader)
401 pending_index_size = next(iheader)
406
402
407 data_size = next(iheader)
403 data_size = next(iheader)
408
404
409 pending_data_size = next(iheader)
405 pending_data_size = next(iheader)
410
406
411 sidedata_size = next(iheader)
407 sidedata_size = next(iheader)
412
408
413 pending_sidedata_size = next(iheader)
409 pending_sidedata_size = next(iheader)
414
410
415 default_compression_header = next(iheader)
411 default_compression_header = next(iheader)
416
412
417 docket = RevlogDocket(
413 docket = RevlogDocket(
418 revlog,
414 revlog,
419 use_pending=use_pending,
415 use_pending=use_pending,
420 version_header=version_header,
416 version_header=version_header,
421 index_uuid=index_uuid,
417 index_uuid=index_uuid,
422 older_index_uuids=older_index_uuids,
418 older_index_uuids=older_index_uuids,
423 data_uuid=data_uuid,
419 data_uuid=data_uuid,
424 older_data_uuids=older_data_uuids,
420 older_data_uuids=older_data_uuids,
425 sidedata_uuid=sidedata_uuid,
421 sidedata_uuid=sidedata_uuid,
426 older_sidedata_uuids=older_sidedata_uuids,
422 older_sidedata_uuids=older_sidedata_uuids,
427 index_end=index_size,
423 index_end=index_size,
428 pending_index_end=pending_index_size,
424 pending_index_end=pending_index_size,
429 data_end=data_size,
425 data_end=data_size,
430 pending_data_end=pending_data_size,
426 pending_data_end=pending_data_size,
431 sidedata_end=sidedata_size,
427 sidedata_end=sidedata_size,
432 pending_sidedata_end=pending_sidedata_size,
428 pending_sidedata_end=pending_sidedata_size,
433 default_compression_header=default_compression_header,
429 default_compression_header=default_compression_header,
434 )
430 )
435 return docket
431 return docket
General Comments 0
You need to be logged in to leave comments. Login now