##// END OF EJS Templates
revlog: use `rev` instead of `i` in replace_sidedata_info...
marmoute -
r48017:3b04cf97 default
parent child Browse files
Show More
@@ -1,404 +1,404
1 1 # parsers.py - Python implementation of parsers.c
2 2 #
3 3 # Copyright 2009 Olivia Mackall <olivia@selenic.com> and others
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import struct
11 11 import zlib
12 12
13 13 from ..node import (
14 14 nullrev,
15 15 sha1nodeconstants,
16 16 )
17 17 from .. import (
18 18 error,
19 19 pycompat,
20 20 util,
21 21 )
22 22
23 23 from ..revlogutils import nodemap as nodemaputil
24 24 from ..revlogutils import constants as revlog_constants
25 25
26 26 stringio = pycompat.bytesio
27 27
28 28
29 29 _pack = struct.pack
30 30 _unpack = struct.unpack
31 31 _compress = zlib.compress
32 32 _decompress = zlib.decompress
33 33
34 34 # Some code below makes tuples directly because it's more convenient. However,
35 35 # code outside this module should always use dirstatetuple.
36 36 def dirstatetuple(*x):
37 37 # x is a tuple
38 38 return x
39 39
40 40
41 41 def gettype(q):
42 42 return int(q & 0xFFFF)
43 43
44 44
45 45 def offset_type(offset, type):
46 46 return int(int(offset) << 16 | type)
47 47
48 48
49 49 class BaseIndexObject(object):
50 50 # Format of an index entry according to Python's `struct` language
51 51 index_format = revlog_constants.INDEX_ENTRY_V1
52 52 # Size of a C unsigned long long int, platform independent
53 53 big_int_size = struct.calcsize(b'>Q')
54 54 # Size of a C long int, platform independent
55 55 int_size = struct.calcsize(b'>i')
56 56 # An empty index entry, used as a default value to be overridden, or nullrev
57 57 null_item = (0, 0, 0, -1, -1, -1, -1, sha1nodeconstants.nullid, 0, 0)
58 58
59 59 @util.propertycache
60 60 def entry_size(self):
61 61 return self.index_format.size
62 62
63 63 @property
64 64 def nodemap(self):
65 65 msg = b"index.nodemap is deprecated, use index.[has_node|rev|get_rev]"
66 66 util.nouideprecwarn(msg, b'5.3', stacklevel=2)
67 67 return self._nodemap
68 68
69 69 @util.propertycache
70 70 def _nodemap(self):
71 71 nodemap = nodemaputil.NodeMap({sha1nodeconstants.nullid: nullrev})
72 72 for r in range(0, len(self)):
73 73 n = self[r][7]
74 74 nodemap[n] = r
75 75 return nodemap
76 76
77 77 def has_node(self, node):
78 78 """return True if the node exist in the index"""
79 79 return node in self._nodemap
80 80
81 81 def rev(self, node):
82 82 """return a revision for a node
83 83
84 84 If the node is unknown, raise a RevlogError"""
85 85 return self._nodemap[node]
86 86
87 87 def get_rev(self, node):
88 88 """return a revision for a node
89 89
90 90 If the node is unknown, return None"""
91 91 return self._nodemap.get(node)
92 92
93 93 def _stripnodes(self, start):
94 94 if '_nodemap' in vars(self):
95 95 for r in range(start, len(self)):
96 96 n = self[r][7]
97 97 del self._nodemap[n]
98 98
99 99 def clearcaches(self):
100 100 self.__dict__.pop('_nodemap', None)
101 101
102 102 def __len__(self):
103 103 return self._lgt + len(self._extra)
104 104
105 105 def append(self, tup):
106 106 if '_nodemap' in vars(self):
107 107 self._nodemap[tup[7]] = len(self)
108 108 data = self._pack_entry(tup)
109 109 self._extra.append(data)
110 110
111 111 def _pack_entry(self, entry):
112 112 assert entry[8] == 0
113 113 assert entry[9] == 0
114 114 return self.index_format.pack(*entry[:8])
115 115
116 116 def _check_index(self, i):
117 117 if not isinstance(i, int):
118 118 raise TypeError(b"expecting int indexes")
119 119 if i < 0 or i >= len(self):
120 120 raise IndexError
121 121
122 122 def __getitem__(self, i):
123 123 if i == -1:
124 124 return self.null_item
125 125 self._check_index(i)
126 126 if i >= self._lgt:
127 127 data = self._extra[i - self._lgt]
128 128 else:
129 129 index = self._calculate_index(i)
130 130 data = self._data[index : index + self.entry_size]
131 131 r = self._unpack_entry(data)
132 132 if self._lgt and i == 0:
133 133 r = (offset_type(0, gettype(r[0])),) + r[1:]
134 134 return r
135 135
136 136 def _unpack_entry(self, data):
137 137 r = self.index_format.unpack(data)
138 138 r = r + (0, 0)
139 139 return r
140 140
141 141 def pack_header(self, header):
142 142 """pack header information as binary"""
143 143 v_fmt = revlog_constants.INDEX_HEADER
144 144 return v_fmt.pack(header)
145 145
146 146 def entry_binary(self, rev):
147 147 """return the raw binary string representing a revision"""
148 148 entry = self[rev]
149 149 p = revlog_constants.INDEX_ENTRY_V1.pack(*entry[:8])
150 150 if rev == 0:
151 151 p = p[revlog_constants.INDEX_HEADER.size :]
152 152 return p
153 153
154 154
155 155 class IndexObject(BaseIndexObject):
156 156 def __init__(self, data):
157 157 assert len(data) % self.entry_size == 0, (
158 158 len(data),
159 159 self.entry_size,
160 160 len(data) % self.entry_size,
161 161 )
162 162 self._data = data
163 163 self._lgt = len(data) // self.entry_size
164 164 self._extra = []
165 165
166 166 def _calculate_index(self, i):
167 167 return i * self.entry_size
168 168
169 169 def __delitem__(self, i):
170 170 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
171 171 raise ValueError(b"deleting slices only supports a:-1 with step 1")
172 172 i = i.start
173 173 self._check_index(i)
174 174 self._stripnodes(i)
175 175 if i < self._lgt:
176 176 self._data = self._data[: i * self.entry_size]
177 177 self._lgt = i
178 178 self._extra = []
179 179 else:
180 180 self._extra = self._extra[: i - self._lgt]
181 181
182 182
183 183 class PersistentNodeMapIndexObject(IndexObject):
184 184 """a Debug oriented class to test persistent nodemap
185 185
186 186 We need a simple python object to test API and higher level behavior. See
187 187 the Rust implementation for more serious usage. This should be used only
188 188 through the dedicated `devel.persistent-nodemap` config.
189 189 """
190 190
191 191 def nodemap_data_all(self):
192 192 """Return bytes containing a full serialization of a nodemap
193 193
194 194 The nodemap should be valid for the full set of revisions in the
195 195 index."""
196 196 return nodemaputil.persistent_data(self)
197 197
198 198 def nodemap_data_incremental(self):
199 199 """Return bytes containing a incremental update to persistent nodemap
200 200
201 201 This containst the data for an append-only update of the data provided
202 202 in the last call to `update_nodemap_data`.
203 203 """
204 204 if self._nm_root is None:
205 205 return None
206 206 docket = self._nm_docket
207 207 changed, data = nodemaputil.update_persistent_data(
208 208 self, self._nm_root, self._nm_max_idx, self._nm_docket.tip_rev
209 209 )
210 210
211 211 self._nm_root = self._nm_max_idx = self._nm_docket = None
212 212 return docket, changed, data
213 213
214 214 def update_nodemap_data(self, docket, nm_data):
215 215 """provide full block of persisted binary data for a nodemap
216 216
217 217 The data are expected to come from disk. See `nodemap_data_all` for a
218 218 produceur of such data."""
219 219 if nm_data is not None:
220 220 self._nm_root, self._nm_max_idx = nodemaputil.parse_data(nm_data)
221 221 if self._nm_root:
222 222 self._nm_docket = docket
223 223 else:
224 224 self._nm_root = self._nm_max_idx = self._nm_docket = None
225 225
226 226
227 227 class InlinedIndexObject(BaseIndexObject):
228 228 def __init__(self, data, inline=0):
229 229 self._data = data
230 230 self._lgt = self._inline_scan(None)
231 231 self._inline_scan(self._lgt)
232 232 self._extra = []
233 233
234 234 def _inline_scan(self, lgt):
235 235 off = 0
236 236 if lgt is not None:
237 237 self._offsets = [0] * lgt
238 238 count = 0
239 239 while off <= len(self._data) - self.entry_size:
240 240 start = off + self.big_int_size
241 241 (s,) = struct.unpack(
242 242 b'>i',
243 243 self._data[start : start + self.int_size],
244 244 )
245 245 if lgt is not None:
246 246 self._offsets[count] = off
247 247 count += 1
248 248 off += self.entry_size + s
249 249 if off != len(self._data):
250 250 raise ValueError(b"corrupted data")
251 251 return count
252 252
253 253 def __delitem__(self, i):
254 254 if not isinstance(i, slice) or not i.stop == -1 or i.step is not None:
255 255 raise ValueError(b"deleting slices only supports a:-1 with step 1")
256 256 i = i.start
257 257 self._check_index(i)
258 258 self._stripnodes(i)
259 259 if i < self._lgt:
260 260 self._offsets = self._offsets[:i]
261 261 self._lgt = i
262 262 self._extra = []
263 263 else:
264 264 self._extra = self._extra[: i - self._lgt]
265 265
266 266 def _calculate_index(self, i):
267 267 return self._offsets[i]
268 268
269 269
270 270 def parse_index2(data, inline, revlogv2=False):
271 271 if not inline:
272 272 cls = IndexObject2 if revlogv2 else IndexObject
273 273 return cls(data), None
274 274 cls = InlinedIndexObject2 if revlogv2 else InlinedIndexObject
275 275 return cls(data, inline), (0, data)
276 276
277 277
278 278 class Index2Mixin(object):
279 279 index_format = revlog_constants.INDEX_ENTRY_V2
280 280
281 281 def replace_sidedata_info(
282 self, i, sidedata_offset, sidedata_length, offset_flags
282 self, rev, sidedata_offset, sidedata_length, offset_flags
283 283 ):
284 284 """
285 285 Replace an existing index entry's sidedata offset and length with new
286 286 ones.
287 287 This cannot be used outside of the context of sidedata rewriting,
288 inside the transaction that creates the revision `i`.
288 inside the transaction that creates the revision `rev`.
289 289 """
290 if i < 0:
290 if rev < 0:
291 291 raise KeyError
292 self._check_index(i)
292 self._check_index(rev)
293 293 sidedata_format = b">Qi"
294 294 packed_size = struct.calcsize(sidedata_format)
295 if i >= self._lgt:
295 if rev >= self._lgt:
296 296 packed = _pack(sidedata_format, sidedata_offset, sidedata_length)
297 old = self._extra[i - self._lgt]
297 old = self._extra[rev - self._lgt]
298 298 offset_flags = struct.pack(b">Q", offset_flags)
299 299 new = offset_flags + old[8:64] + packed + old[64 + packed_size :]
300 self._extra[i - self._lgt] = new
300 self._extra[rev - self._lgt] = new
301 301 else:
302 302 msg = b"cannot rewrite entries outside of this transaction"
303 303 raise KeyError(msg)
304 304
305 305 def _unpack_entry(self, data):
306 306 return self.index_format.unpack(data)
307 307
308 308 def _pack_entry(self, entry):
309 309 return self.index_format.pack(*entry)
310 310
311 311 def entry_binary(self, rev):
312 312 """return the raw binary string representing a revision"""
313 313 entry = self[rev]
314 314 p = revlog_constants.INDEX_ENTRY_V2.pack(*entry)
315 315 return p
316 316
317 317 def pack_header(self, header):
318 318 """pack header information as binary"""
319 319 msg = 'version header should go in the docket, not the index: %d'
320 320 msg %= header
321 321 raise error.ProgrammingError(msg)
322 322
323 323
324 324 class IndexObject2(Index2Mixin, IndexObject):
325 325 pass
326 326
327 327
328 328 class InlinedIndexObject2(Index2Mixin, InlinedIndexObject):
329 329 def _inline_scan(self, lgt):
330 330 sidedata_length_pos = 72
331 331 off = 0
332 332 if lgt is not None:
333 333 self._offsets = [0] * lgt
334 334 count = 0
335 335 while off <= len(self._data) - self.entry_size:
336 336 start = off + self.big_int_size
337 337 (data_size,) = struct.unpack(
338 338 b'>i',
339 339 self._data[start : start + self.int_size],
340 340 )
341 341 start = off + sidedata_length_pos
342 342 (side_data_size,) = struct.unpack(
343 343 b'>i', self._data[start : start + self.int_size]
344 344 )
345 345 if lgt is not None:
346 346 self._offsets[count] = off
347 347 count += 1
348 348 off += self.entry_size + data_size + side_data_size
349 349 if off != len(self._data):
350 350 raise ValueError(b"corrupted data")
351 351 return count
352 352
353 353
354 354 def parse_index_devel_nodemap(data, inline):
355 355 """like parse_index2, but alway return a PersistentNodeMapIndexObject"""
356 356 return PersistentNodeMapIndexObject(data), None
357 357
358 358
359 359 def parse_dirstate(dmap, copymap, st):
360 360 parents = [st[:20], st[20:40]]
361 361 # dereference fields so they will be local in loop
362 362 format = b">cllll"
363 363 e_size = struct.calcsize(format)
364 364 pos1 = 40
365 365 l = len(st)
366 366
367 367 # the inner loop
368 368 while pos1 < l:
369 369 pos2 = pos1 + e_size
370 370 e = _unpack(b">cllll", st[pos1:pos2]) # a literal here is faster
371 371 pos1 = pos2 + e[4]
372 372 f = st[pos2:pos1]
373 373 if b'\0' in f:
374 374 f, c = f.split(b'\0')
375 375 copymap[f] = c
376 376 dmap[f] = e[:4]
377 377 return parents
378 378
379 379
380 380 def pack_dirstate(dmap, copymap, pl, now):
381 381 now = int(now)
382 382 cs = stringio()
383 383 write = cs.write
384 384 write(b"".join(pl))
385 385 for f, e in pycompat.iteritems(dmap):
386 386 if e[0] == b'n' and e[3] == now:
387 387 # The file was last modified "simultaneously" with the current
388 388 # write to dirstate (i.e. within the same second for file-
389 389 # systems with a granularity of 1 sec). This commonly happens
390 390 # for at least a couple of files on 'update'.
391 391 # The user could change the file without changing its size
392 392 # within the same second. Invalidate the file's mtime in
393 393 # dirstate, forcing future 'status' calls to compare the
394 394 # contents of the file if the size is the same. This prevents
395 395 # mistakenly treating such files as clean.
396 396 e = dirstatetuple(e[0], e[1], e[2], -1)
397 397 dmap[f] = e
398 398
399 399 if f in copymap:
400 400 f = b"%s\0%s" % (f, copymap[f])
401 401 e = _pack(b">cllll", e[0], e[1], e[2], e[3], len(f))
402 402 write(e)
403 403 write(f)
404 404 return cs.getvalue()
General Comments 0
You need to be logged in to leave comments. Login now