##// END OF EJS Templates
censor: extract the part about writing the other revision in a function...
marmoute -
r48260:9b70aa7b default
parent child Browse files
Show More
@@ -270,72 +270,97 b' def v2_censor(rl, tr, censornode, tombst'
270 tombstone,
270 tombstone,
271 )
271 )
272
272
273 #### Writing all subsequent revisions
273 # Writing all subsequent revisions
274 for rev in range(censor_rev + 1, len(old_index)):
274 for rev in range(censor_rev + 1, len(old_index)):
275 entry = old_index[rev]
275 _rewrite_simple(
276 flags = entry[ENTRY_DATA_OFFSET] & 0xFFFF
276 rl,
277 old_data_offset = entry[ENTRY_DATA_OFFSET] >> 16
277 old_index,
278 open_files,
279 rev,
280 rewritten_entries,
281 tmp_storage,
282 )
283 docket.write(transaction=None, stripping=True)
284
278
285
279 if rev not in rewritten_entries:
286 def _rewrite_simple(
280 old_data_file.seek(old_data_offset)
287 revlog,
281 new_data_size = entry[ENTRY_DATA_COMPRESSED_LENGTH]
288 old_index,
282 new_data = old_data_file.read(new_data_size)
289 all_files,
283 data_delta_base = entry[ENTRY_DELTA_BASE]
290 rev,
284 d_comp_mode = entry[ENTRY_DATA_COMPRESSION_MODE]
291 rewritten_entries,
285 else:
292 tmp_storage,
286 (
293 ):
287 data_delta_base,
294 """append a normal revision to the index after the rewritten one(s)"""
288 start,
295 (
289 end,
296 old_data_file,
290 d_comp_mode,
297 old_sidedata_file,
291 ) = rewritten_entries[rev]
298 new_index_file,
292 new_data_size = end - start
299 new_data_file,
293 tmp_storage.seek(start)
300 new_sidedata_file,
294 new_data = tmp_storage.read(new_data_size)
301 ) = all_files
295
302 entry = old_index[rev]
296 # It might be faster to group continuous read/write operation,
303 flags = entry[ENTRY_DATA_OFFSET] & 0xFFFF
297 # however, this is censor, an operation that is not focussed
304 old_data_offset = entry[ENTRY_DATA_OFFSET] >> 16
298 # around stellar performance. So I have not written this
299 # optimisation yet.
300 new_data_offset = new_data_file.tell()
301 new_data_file.write(new_data)
302
305
303 sidedata_size = entry[ENTRY_SIDEDATA_COMPRESSED_LENGTH]
306 if rev not in rewritten_entries:
304 new_sidedata_offset = new_sidedata_file.tell()
307 old_data_file.seek(old_data_offset)
305 if 0 < sidedata_size:
308 new_data_size = entry[ENTRY_DATA_COMPRESSED_LENGTH]
306 old_sidedata_offset = entry[ENTRY_SIDEDATA_OFFSET]
309 new_data = old_data_file.read(new_data_size)
307 old_sidedata_file.seek(old_sidedata_offset)
310 data_delta_base = entry[ENTRY_DELTA_BASE]
308 new_sidedata = old_sidedata_file.read(sidedata_size)
311 d_comp_mode = entry[ENTRY_DATA_COMPRESSION_MODE]
309 new_sidedata_file.write(new_sidedata)
312 else:
313 (
314 data_delta_base,
315 start,
316 end,
317 d_comp_mode,
318 ) = rewritten_entries[rev]
319 new_data_size = end - start
320 tmp_storage.seek(start)
321 new_data = tmp_storage.read(new_data_size)
310
322
311 data_uncompressed_length = entry[ENTRY_DATA_UNCOMPRESSED_LENGTH]
323 # It might be faster to group continuous read/write operation,
312 sd_com_mode = entry[ENTRY_SIDEDATA_COMPRESSION_MODE]
324 # however, this is censor, an operation that is not focussed
313 assert data_delta_base <= rev, (data_delta_base, rev)
325 # around stellar performance. So I have not written this
326 # optimisation yet.
327 new_data_offset = new_data_file.tell()
328 new_data_file.write(new_data)
314
329
315 new_entry = revlogutils.entry(
330 sidedata_size = entry[ENTRY_SIDEDATA_COMPRESSED_LENGTH]
316 flags=flags,
331 new_sidedata_offset = new_sidedata_file.tell()
317 data_offset=new_data_offset,
332 if 0 < sidedata_size:
318 data_compressed_length=new_data_size,
333 old_sidedata_offset = entry[ENTRY_SIDEDATA_OFFSET]
319 data_uncompressed_length=data_uncompressed_length,
334 old_sidedata_file.seek(old_sidedata_offset)
320 data_delta_base=data_delta_base,
335 new_sidedata = old_sidedata_file.read(sidedata_size)
321 link_rev=entry[ENTRY_LINK_REV],
336 new_sidedata_file.write(new_sidedata)
322 parent_rev_1=entry[ENTRY_PARENT_1],
337
323 parent_rev_2=entry[ENTRY_PARENT_2],
338 data_uncompressed_length = entry[ENTRY_DATA_UNCOMPRESSED_LENGTH]
324 node_id=entry[ENTRY_NODE_ID],
339 sd_com_mode = entry[ENTRY_SIDEDATA_COMPRESSION_MODE]
325 sidedata_offset=new_sidedata_offset,
340 assert data_delta_base <= rev, (data_delta_base, rev)
326 sidedata_compressed_length=sidedata_size,
327 data_compression_mode=d_comp_mode,
328 sidedata_compression_mode=sd_com_mode,
329 )
330 rl.index.append(new_entry)
331 entry_bin = rl.index.entry_binary(rev)
332 new_index_file.write(entry_bin)
333
341
334 docket.index_end = new_index_file.tell()
342 new_entry = revlogutils.entry(
335 docket.data_end = new_data_file.tell()
343 flags=flags,
336 docket.sidedata_end = new_sidedata_file.tell()
344 data_offset=new_data_offset,
345 data_compressed_length=new_data_size,
346 data_uncompressed_length=data_uncompressed_length,
347 data_delta_base=data_delta_base,
348 link_rev=entry[ENTRY_LINK_REV],
349 parent_rev_1=entry[ENTRY_PARENT_1],
350 parent_rev_2=entry[ENTRY_PARENT_2],
351 node_id=entry[ENTRY_NODE_ID],
352 sidedata_offset=new_sidedata_offset,
353 sidedata_compressed_length=sidedata_size,
354 data_compression_mode=d_comp_mode,
355 sidedata_compression_mode=sd_com_mode,
356 )
357 revlog.index.append(new_entry)
358 entry_bin = revlog.index.entry_binary(rev)
359 new_index_file.write(entry_bin)
337
360
338 docket.write(transaction=None, stripping=True)
361 revlog._docket.index_end = new_index_file.tell()
362 revlog._docket.data_end = new_data_file.tell()
363 revlog._docket.sidedata_end = new_sidedata_file.tell()
339
364
340
365
341 def _rewrite_censor(
366 def _rewrite_censor(
General Comments 0
You need to be logged in to leave comments. Login now