##// END OF EJS Templates
censor: extract the part about recomputing delta in a function...
marmoute -
r48261:60c48458 default
parent child Browse files
Show More
@@ -1,414 +1,436 b''
1 # censor code related to censoring revision
1 # censor code related to censoring revision
2 # coding: utf8
2 # coding: utf8
3 #
3 #
4 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
4 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
5 # Copyright 2015 Google, Inc <martinvonz@google.com>
5 # Copyright 2015 Google, Inc <martinvonz@google.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 import contextlib
10 import contextlib
11 import os
11 import os
12
12
13 from ..node import (
13 from ..node import (
14 nullrev,
14 nullrev,
15 )
15 )
16 from .constants import (
16 from .constants import (
17 COMP_MODE_PLAIN,
17 COMP_MODE_PLAIN,
18 ENTRY_DATA_COMPRESSED_LENGTH,
18 ENTRY_DATA_COMPRESSED_LENGTH,
19 ENTRY_DATA_COMPRESSION_MODE,
19 ENTRY_DATA_COMPRESSION_MODE,
20 ENTRY_DATA_OFFSET,
20 ENTRY_DATA_OFFSET,
21 ENTRY_DATA_UNCOMPRESSED_LENGTH,
21 ENTRY_DATA_UNCOMPRESSED_LENGTH,
22 ENTRY_DELTA_BASE,
22 ENTRY_DELTA_BASE,
23 ENTRY_LINK_REV,
23 ENTRY_LINK_REV,
24 ENTRY_NODE_ID,
24 ENTRY_NODE_ID,
25 ENTRY_PARENT_1,
25 ENTRY_PARENT_1,
26 ENTRY_PARENT_2,
26 ENTRY_PARENT_2,
27 ENTRY_SIDEDATA_COMPRESSED_LENGTH,
27 ENTRY_SIDEDATA_COMPRESSED_LENGTH,
28 ENTRY_SIDEDATA_COMPRESSION_MODE,
28 ENTRY_SIDEDATA_COMPRESSION_MODE,
29 ENTRY_SIDEDATA_OFFSET,
29 ENTRY_SIDEDATA_OFFSET,
30 REVLOGV0,
30 REVLOGV0,
31 REVLOGV1,
31 REVLOGV1,
32 )
32 )
33 from ..i18n import _
33 from ..i18n import _
34
34
35 from .. import (
35 from .. import (
36 error,
36 error,
37 pycompat,
37 pycompat,
38 revlogutils,
38 revlogutils,
39 util,
39 util,
40 )
40 )
41 from ..utils import (
41 from ..utils import (
42 storageutil,
42 storageutil,
43 )
43 )
44 from . import (
44 from . import (
45 constants,
45 constants,
46 deltas,
46 deltas,
47 )
47 )
48
48
49
49
50 def v1_censor(rl, tr, censornode, tombstone=b''):
50 def v1_censor(rl, tr, censornode, tombstone=b''):
51 """censors a revision in a "version 1" revlog"""
51 """censors a revision in a "version 1" revlog"""
52 assert rl._format_version == constants.REVLOGV1, rl._format_version
52 assert rl._format_version == constants.REVLOGV1, rl._format_version
53
53
54 # avoid cycle
54 # avoid cycle
55 from .. import revlog
55 from .. import revlog
56
56
57 censorrev = rl.rev(censornode)
57 censorrev = rl.rev(censornode)
58 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
58 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
59
59
60 # Rewriting the revlog in place is hard. Our strategy for censoring is
60 # Rewriting the revlog in place is hard. Our strategy for censoring is
61 # to create a new revlog, copy all revisions to it, then replace the
61 # to create a new revlog, copy all revisions to it, then replace the
62 # revlogs on transaction close.
62 # revlogs on transaction close.
63 #
63 #
64 # This is a bit dangerous. We could easily have a mismatch of state.
64 # This is a bit dangerous. We could easily have a mismatch of state.
65 newrl = revlog.revlog(
65 newrl = revlog.revlog(
66 rl.opener,
66 rl.opener,
67 target=rl.target,
67 target=rl.target,
68 radix=rl.radix,
68 radix=rl.radix,
69 postfix=b'tmpcensored',
69 postfix=b'tmpcensored',
70 censorable=True,
70 censorable=True,
71 )
71 )
72 newrl._format_version = rl._format_version
72 newrl._format_version = rl._format_version
73 newrl._format_flags = rl._format_flags
73 newrl._format_flags = rl._format_flags
74 newrl._generaldelta = rl._generaldelta
74 newrl._generaldelta = rl._generaldelta
75 newrl._parse_index = rl._parse_index
75 newrl._parse_index = rl._parse_index
76
76
77 for rev in rl.revs():
77 for rev in rl.revs():
78 node = rl.node(rev)
78 node = rl.node(rev)
79 p1, p2 = rl.parents(node)
79 p1, p2 = rl.parents(node)
80
80
81 if rev == censorrev:
81 if rev == censorrev:
82 newrl.addrawrevision(
82 newrl.addrawrevision(
83 tombstone,
83 tombstone,
84 tr,
84 tr,
85 rl.linkrev(censorrev),
85 rl.linkrev(censorrev),
86 p1,
86 p1,
87 p2,
87 p2,
88 censornode,
88 censornode,
89 constants.REVIDX_ISCENSORED,
89 constants.REVIDX_ISCENSORED,
90 )
90 )
91
91
92 if newrl.deltaparent(rev) != nullrev:
92 if newrl.deltaparent(rev) != nullrev:
93 m = _(b'censored revision stored as delta; cannot censor')
93 m = _(b'censored revision stored as delta; cannot censor')
94 h = _(
94 h = _(
95 b'censoring of revlogs is not fully implemented;'
95 b'censoring of revlogs is not fully implemented;'
96 b' please report this bug'
96 b' please report this bug'
97 )
97 )
98 raise error.Abort(m, hint=h)
98 raise error.Abort(m, hint=h)
99 continue
99 continue
100
100
101 if rl.iscensored(rev):
101 if rl.iscensored(rev):
102 if rl.deltaparent(rev) != nullrev:
102 if rl.deltaparent(rev) != nullrev:
103 m = _(
103 m = _(
104 b'cannot censor due to censored '
104 b'cannot censor due to censored '
105 b'revision having delta stored'
105 b'revision having delta stored'
106 )
106 )
107 raise error.Abort(m)
107 raise error.Abort(m)
108 rawtext = rl._chunk(rev)
108 rawtext = rl._chunk(rev)
109 else:
109 else:
110 rawtext = rl.rawdata(rev)
110 rawtext = rl.rawdata(rev)
111
111
112 newrl.addrawrevision(
112 newrl.addrawrevision(
113 rawtext, tr, rl.linkrev(rev), p1, p2, node, rl.flags(rev)
113 rawtext, tr, rl.linkrev(rev), p1, p2, node, rl.flags(rev)
114 )
114 )
115
115
116 tr.addbackup(rl._indexfile, location=b'store')
116 tr.addbackup(rl._indexfile, location=b'store')
117 if not rl._inline:
117 if not rl._inline:
118 tr.addbackup(rl._datafile, location=b'store')
118 tr.addbackup(rl._datafile, location=b'store')
119
119
120 rl.opener.rename(newrl._indexfile, rl._indexfile)
120 rl.opener.rename(newrl._indexfile, rl._indexfile)
121 if not rl._inline:
121 if not rl._inline:
122 rl.opener.rename(newrl._datafile, rl._datafile)
122 rl.opener.rename(newrl._datafile, rl._datafile)
123
123
124 rl.clearcaches()
124 rl.clearcaches()
125 rl._loadindex()
125 rl._loadindex()
126
126
127
127
128 def v2_censor(rl, tr, censornode, tombstone=b''):
128 def v2_censor(rl, tr, censornode, tombstone=b''):
129 """censors a revision in a "version 2" revlog"""
129 """censors a revision in a "version 2" revlog"""
130 # General principle
130 # General principle
131 #
131 #
132 # We create new revlog files (index/data/sidedata) to copy the content of
132 # We create new revlog files (index/data/sidedata) to copy the content of
133 # the existing data without the censored data.
133 # the existing data without the censored data.
134 #
134 #
135 # We need to recompute new delta for any revision that used the censored
135 # We need to recompute new delta for any revision that used the censored
136 # revision as delta base. As the cumulative size of the new delta may be
136 # revision as delta base. As the cumulative size of the new delta may be
137 # large, we store them in a temporary file until they are stored in their
137 # large, we store them in a temporary file until they are stored in their
138 # final destination.
138 # final destination.
139 #
139 #
140 # All data before the censored data can be blindly copied. The rest needs
140 # All data before the censored data can be blindly copied. The rest needs
141 # to be copied as we go and the associated index entry needs adjustement.
141 # to be copied as we go and the associated index entry needs adjustement.
142
142
143 assert rl._format_version != REVLOGV0, rl._format_version
143 assert rl._format_version != REVLOGV0, rl._format_version
144 assert rl._format_version != REVLOGV1, rl._format_version
144 assert rl._format_version != REVLOGV1, rl._format_version
145
145
146 old_index = rl.index
146 old_index = rl.index
147 docket = rl._docket
147 docket = rl._docket
148
148
149 censor_rev = rl.rev(censornode)
149 censor_rev = rl.rev(censornode)
150 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
150 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
151
151
152 censored_entry = rl.index[censor_rev]
152 censored_entry = rl.index[censor_rev]
153 index_cutoff = rl.index.entry_size * censor_rev
153 index_cutoff = rl.index.entry_size * censor_rev
154 data_cutoff = censored_entry[ENTRY_DATA_OFFSET] >> 16
154 data_cutoff = censored_entry[ENTRY_DATA_OFFSET] >> 16
155 sidedata_cutoff = rl.sidedata_cut_off(censor_rev)
155 sidedata_cutoff = rl.sidedata_cut_off(censor_rev)
156
156
157 # rev β†’ (new_base, data_start, data_end)
158 rewritten_entries = {}
159
160 dc = deltas.deltacomputer(rl)
161 excl = [censor_rev]
162
163 with pycompat.unnamedtempfile(mode=b"w+b") as tmp_storage:
157 with pycompat.unnamedtempfile(mode=b"w+b") as tmp_storage:
164 with rl._segmentfile._open_read() as dfh:
158 # rev β†’ (new_base, data_start, data_end, compression_mode)
165 for rev in range(censor_rev + 1, len(old_index)):
159 rewritten_entries = _precompute_rewritten_delta(
166 entry = old_index[rev]
160 rl,
167 if censor_rev != entry[ENTRY_DELTA_BASE]:
161 old_index,
168 continue
162 {censor_rev},
169 # This is a revision that use the censored revision as the base
163 tmp_storage,
170 # for its delta. We need a need new deltas
171 if entry[ENTRY_DATA_UNCOMPRESSED_LENGTH] == 0:
172 # this revision is empty, we can delta against nullrev
173 rewritten_entries[rev] = (nullrev, 0, 0)
174 else:
175
176 text = rl.rawdata(rev, _df=dfh)
177 info = revlogutils.revisioninfo(
178 node=entry[ENTRY_NODE_ID],
179 p1=rl.node(entry[ENTRY_PARENT_1]),
180 p2=rl.node(entry[ENTRY_PARENT_2]),
181 btext=[text],
182 textlen=len(text),
183 cachedelta=None,
184 flags=entry[ENTRY_DATA_OFFSET] & 0xFFFF,
185 )
164 )
186 d = dc.finddeltainfo(
187 info, dfh, excluded_bases=excl, target_rev=rev
188 )
189 default_comp = rl._docket.default_compression_header
190 comp_mode, d = deltas.delta_compression(default_comp, d)
191 # using `tell` is a bit lazy, but we are not here for speed
192 start = tmp_storage.tell()
193 tmp_storage.write(d.data[1])
194 end = tmp_storage.tell()
195 rewritten_entries[rev] = (d.base, start, end, comp_mode)
196
165
197 old_index_filepath = rl.opener.join(docket.index_filepath())
166 old_index_filepath = rl.opener.join(docket.index_filepath())
198 old_data_filepath = rl.opener.join(docket.data_filepath())
167 old_data_filepath = rl.opener.join(docket.data_filepath())
199 old_sidedata_filepath = rl.opener.join(docket.sidedata_filepath())
168 old_sidedata_filepath = rl.opener.join(docket.sidedata_filepath())
200
169
201 new_index_filepath = rl.opener.join(docket.new_index_file())
170 new_index_filepath = rl.opener.join(docket.new_index_file())
202 new_data_filepath = rl.opener.join(docket.new_data_file())
171 new_data_filepath = rl.opener.join(docket.new_data_file())
203 new_sidedata_filepath = rl.opener.join(docket.new_sidedata_file())
172 new_sidedata_filepath = rl.opener.join(docket.new_sidedata_file())
204
173
205 util.copyfile(
174 util.copyfile(
206 old_index_filepath, new_index_filepath, nb_bytes=index_cutoff
175 old_index_filepath, new_index_filepath, nb_bytes=index_cutoff
207 )
176 )
208 util.copyfile(
177 util.copyfile(
209 old_data_filepath, new_data_filepath, nb_bytes=data_cutoff
178 old_data_filepath, new_data_filepath, nb_bytes=data_cutoff
210 )
179 )
211 util.copyfile(
180 util.copyfile(
212 old_sidedata_filepath,
181 old_sidedata_filepath,
213 new_sidedata_filepath,
182 new_sidedata_filepath,
214 nb_bytes=sidedata_cutoff,
183 nb_bytes=sidedata_cutoff,
215 )
184 )
216 rl.opener.register_file(docket.index_filepath())
185 rl.opener.register_file(docket.index_filepath())
217 rl.opener.register_file(docket.data_filepath())
186 rl.opener.register_file(docket.data_filepath())
218 rl.opener.register_file(docket.sidedata_filepath())
187 rl.opener.register_file(docket.sidedata_filepath())
219
188
220 docket.index_end = index_cutoff
189 docket.index_end = index_cutoff
221 docket.data_end = data_cutoff
190 docket.data_end = data_cutoff
222 docket.sidedata_end = sidedata_cutoff
191 docket.sidedata_end = sidedata_cutoff
223
192
224 # reload the revlog internal information
193 # reload the revlog internal information
225 rl.clearcaches()
194 rl.clearcaches()
226 rl._loadindex(docket=docket)
195 rl._loadindex(docket=docket)
227
196
228 @contextlib.contextmanager
197 @contextlib.contextmanager
229 def all_files():
198 def all_files():
230 # hide opening in an helper function to please check-code, black
199 # hide opening in an helper function to please check-code, black
231 # and various python ersion at the same time
200 # and various python ersion at the same time
232 with open(old_data_filepath, 'rb') as old_data_file:
201 with open(old_data_filepath, 'rb') as old_data_file:
233 with open(old_sidedata_filepath, 'rb') as old_sidedata_file:
202 with open(old_sidedata_filepath, 'rb') as old_sidedata_file:
234 with open(new_index_filepath, 'r+b') as new_index_file:
203 with open(new_index_filepath, 'r+b') as new_index_file:
235 with open(new_data_filepath, 'r+b') as new_data_file:
204 with open(new_data_filepath, 'r+b') as new_data_file:
236 with open(
205 with open(
237 new_sidedata_filepath, 'r+b'
206 new_sidedata_filepath, 'r+b'
238 ) as new_sidedata_file:
207 ) as new_sidedata_file:
239 yield (
208 yield (
240 old_data_file,
209 old_data_file,
241 old_sidedata_file,
210 old_sidedata_file,
242 new_index_file,
211 new_index_file,
243 new_data_file,
212 new_data_file,
244 new_sidedata_file,
213 new_sidedata_file,
245 )
214 )
246
215
247 # we dont need to open the old index file since its content already
216 # we dont need to open the old index file since its content already
248 # exist in a usable form in `old_index`.
217 # exist in a usable form in `old_index`.
249 with all_files() as open_files:
218 with all_files() as open_files:
250 (
219 (
251 old_data_file,
220 old_data_file,
252 old_sidedata_file,
221 old_sidedata_file,
253 new_index_file,
222 new_index_file,
254 new_data_file,
223 new_data_file,
255 new_sidedata_file,
224 new_sidedata_file,
256 ) = open_files
225 ) = open_files
257 new_index_file.seek(0, os.SEEK_END)
226 new_index_file.seek(0, os.SEEK_END)
258 assert new_index_file.tell() == index_cutoff
227 assert new_index_file.tell() == index_cutoff
259 new_data_file.seek(0, os.SEEK_END)
228 new_data_file.seek(0, os.SEEK_END)
260 assert new_data_file.tell() == data_cutoff
229 assert new_data_file.tell() == data_cutoff
261 new_sidedata_file.seek(0, os.SEEK_END)
230 new_sidedata_file.seek(0, os.SEEK_END)
262 assert new_sidedata_file.tell() == sidedata_cutoff
231 assert new_sidedata_file.tell() == sidedata_cutoff
263
232
264 # writing the censored revision
233 # writing the censored revision
265 _rewrite_censor(
234 _rewrite_censor(
266 rl,
235 rl,
267 old_index,
236 old_index,
268 open_files,
237 open_files,
269 censor_rev,
238 censor_rev,
270 tombstone,
239 tombstone,
271 )
240 )
272
241
273 # Writing all subsequent revisions
242 # Writing all subsequent revisions
274 for rev in range(censor_rev + 1, len(old_index)):
243 for rev in range(censor_rev + 1, len(old_index)):
275 _rewrite_simple(
244 _rewrite_simple(
276 rl,
245 rl,
277 old_index,
246 old_index,
278 open_files,
247 open_files,
279 rev,
248 rev,
280 rewritten_entries,
249 rewritten_entries,
281 tmp_storage,
250 tmp_storage,
282 )
251 )
283 docket.write(transaction=None, stripping=True)
252 docket.write(transaction=None, stripping=True)
284
253
285
254
255 def _precompute_rewritten_delta(
256 revlog,
257 old_index,
258 excluded_revs,
259 tmp_storage,
260 ):
261 """Compute new delta for revisions whose delta is based on revision that
262 will not survive as is.
263
264 Return a mapping: {rev β†’ (new_base, data_start, data_end, compression_mode)}
265 """
266 dc = deltas.deltacomputer(revlog)
267 rewritten_entries = {}
268 first_excl_rev = min(excluded_revs)
269 with revlog._segmentfile._open_read() as dfh:
270 for rev in range(first_excl_rev, len(old_index)):
271 if rev in excluded_revs:
272 # this revision will be preserved as is, so we don't need to
273 # consider recomputing a delta.
274 continue
275 entry = old_index[rev]
276 if entry[ENTRY_DELTA_BASE] not in excluded_revs:
277 continue
278 # This is a revision that use the censored revision as the base
279 # for its delta. We need a need new deltas
280 if entry[ENTRY_DATA_UNCOMPRESSED_LENGTH] == 0:
281 # this revision is empty, we can delta against nullrev
282 rewritten_entries[rev] = (nullrev, 0, 0, COMP_MODE_PLAIN)
283 else:
284
285 text = revlog.rawdata(rev, _df=dfh)
286 info = revlogutils.revisioninfo(
287 node=entry[ENTRY_NODE_ID],
288 p1=revlog.node(entry[ENTRY_PARENT_1]),
289 p2=revlog.node(entry[ENTRY_PARENT_2]),
290 btext=[text],
291 textlen=len(text),
292 cachedelta=None,
293 flags=entry[ENTRY_DATA_OFFSET] & 0xFFFF,
294 )
295 d = dc.finddeltainfo(
296 info, dfh, excluded_bases=excluded_revs, target_rev=rev
297 )
298 default_comp = revlog._docket.default_compression_header
299 comp_mode, d = deltas.delta_compression(default_comp, d)
300 # using `tell` is a bit lazy, but we are not here for speed
301 start = tmp_storage.tell()
302 tmp_storage.write(d.data[1])
303 end = tmp_storage.tell()
304 rewritten_entries[rev] = (d.base, start, end, comp_mode)
305 return rewritten_entries
306
307
286 def _rewrite_simple(
308 def _rewrite_simple(
287 revlog,
309 revlog,
288 old_index,
310 old_index,
289 all_files,
311 all_files,
290 rev,
312 rev,
291 rewritten_entries,
313 rewritten_entries,
292 tmp_storage,
314 tmp_storage,
293 ):
315 ):
294 """append a normal revision to the index after the rewritten one(s)"""
316 """append a normal revision to the index after the rewritten one(s)"""
295 (
317 (
296 old_data_file,
318 old_data_file,
297 old_sidedata_file,
319 old_sidedata_file,
298 new_index_file,
320 new_index_file,
299 new_data_file,
321 new_data_file,
300 new_sidedata_file,
322 new_sidedata_file,
301 ) = all_files
323 ) = all_files
302 entry = old_index[rev]
324 entry = old_index[rev]
303 flags = entry[ENTRY_DATA_OFFSET] & 0xFFFF
325 flags = entry[ENTRY_DATA_OFFSET] & 0xFFFF
304 old_data_offset = entry[ENTRY_DATA_OFFSET] >> 16
326 old_data_offset = entry[ENTRY_DATA_OFFSET] >> 16
305
327
306 if rev not in rewritten_entries:
328 if rev not in rewritten_entries:
307 old_data_file.seek(old_data_offset)
329 old_data_file.seek(old_data_offset)
308 new_data_size = entry[ENTRY_DATA_COMPRESSED_LENGTH]
330 new_data_size = entry[ENTRY_DATA_COMPRESSED_LENGTH]
309 new_data = old_data_file.read(new_data_size)
331 new_data = old_data_file.read(new_data_size)
310 data_delta_base = entry[ENTRY_DELTA_BASE]
332 data_delta_base = entry[ENTRY_DELTA_BASE]
311 d_comp_mode = entry[ENTRY_DATA_COMPRESSION_MODE]
333 d_comp_mode = entry[ENTRY_DATA_COMPRESSION_MODE]
312 else:
334 else:
313 (
335 (
314 data_delta_base,
336 data_delta_base,
315 start,
337 start,
316 end,
338 end,
317 d_comp_mode,
339 d_comp_mode,
318 ) = rewritten_entries[rev]
340 ) = rewritten_entries[rev]
319 new_data_size = end - start
341 new_data_size = end - start
320 tmp_storage.seek(start)
342 tmp_storage.seek(start)
321 new_data = tmp_storage.read(new_data_size)
343 new_data = tmp_storage.read(new_data_size)
322
344
323 # It might be faster to group continuous read/write operation,
345 # It might be faster to group continuous read/write operation,
324 # however, this is censor, an operation that is not focussed
346 # however, this is censor, an operation that is not focussed
325 # around stellar performance. So I have not written this
347 # around stellar performance. So I have not written this
326 # optimisation yet.
348 # optimisation yet.
327 new_data_offset = new_data_file.tell()
349 new_data_offset = new_data_file.tell()
328 new_data_file.write(new_data)
350 new_data_file.write(new_data)
329
351
330 sidedata_size = entry[ENTRY_SIDEDATA_COMPRESSED_LENGTH]
352 sidedata_size = entry[ENTRY_SIDEDATA_COMPRESSED_LENGTH]
331 new_sidedata_offset = new_sidedata_file.tell()
353 new_sidedata_offset = new_sidedata_file.tell()
332 if 0 < sidedata_size:
354 if 0 < sidedata_size:
333 old_sidedata_offset = entry[ENTRY_SIDEDATA_OFFSET]
355 old_sidedata_offset = entry[ENTRY_SIDEDATA_OFFSET]
334 old_sidedata_file.seek(old_sidedata_offset)
356 old_sidedata_file.seek(old_sidedata_offset)
335 new_sidedata = old_sidedata_file.read(sidedata_size)
357 new_sidedata = old_sidedata_file.read(sidedata_size)
336 new_sidedata_file.write(new_sidedata)
358 new_sidedata_file.write(new_sidedata)
337
359
338 data_uncompressed_length = entry[ENTRY_DATA_UNCOMPRESSED_LENGTH]
360 data_uncompressed_length = entry[ENTRY_DATA_UNCOMPRESSED_LENGTH]
339 sd_com_mode = entry[ENTRY_SIDEDATA_COMPRESSION_MODE]
361 sd_com_mode = entry[ENTRY_SIDEDATA_COMPRESSION_MODE]
340 assert data_delta_base <= rev, (data_delta_base, rev)
362 assert data_delta_base <= rev, (data_delta_base, rev)
341
363
342 new_entry = revlogutils.entry(
364 new_entry = revlogutils.entry(
343 flags=flags,
365 flags=flags,
344 data_offset=new_data_offset,
366 data_offset=new_data_offset,
345 data_compressed_length=new_data_size,
367 data_compressed_length=new_data_size,
346 data_uncompressed_length=data_uncompressed_length,
368 data_uncompressed_length=data_uncompressed_length,
347 data_delta_base=data_delta_base,
369 data_delta_base=data_delta_base,
348 link_rev=entry[ENTRY_LINK_REV],
370 link_rev=entry[ENTRY_LINK_REV],
349 parent_rev_1=entry[ENTRY_PARENT_1],
371 parent_rev_1=entry[ENTRY_PARENT_1],
350 parent_rev_2=entry[ENTRY_PARENT_2],
372 parent_rev_2=entry[ENTRY_PARENT_2],
351 node_id=entry[ENTRY_NODE_ID],
373 node_id=entry[ENTRY_NODE_ID],
352 sidedata_offset=new_sidedata_offset,
374 sidedata_offset=new_sidedata_offset,
353 sidedata_compressed_length=sidedata_size,
375 sidedata_compressed_length=sidedata_size,
354 data_compression_mode=d_comp_mode,
376 data_compression_mode=d_comp_mode,
355 sidedata_compression_mode=sd_com_mode,
377 sidedata_compression_mode=sd_com_mode,
356 )
378 )
357 revlog.index.append(new_entry)
379 revlog.index.append(new_entry)
358 entry_bin = revlog.index.entry_binary(rev)
380 entry_bin = revlog.index.entry_binary(rev)
359 new_index_file.write(entry_bin)
381 new_index_file.write(entry_bin)
360
382
361 revlog._docket.index_end = new_index_file.tell()
383 revlog._docket.index_end = new_index_file.tell()
362 revlog._docket.data_end = new_data_file.tell()
384 revlog._docket.data_end = new_data_file.tell()
363 revlog._docket.sidedata_end = new_sidedata_file.tell()
385 revlog._docket.sidedata_end = new_sidedata_file.tell()
364
386
365
387
366 def _rewrite_censor(
388 def _rewrite_censor(
367 revlog,
389 revlog,
368 old_index,
390 old_index,
369 all_files,
391 all_files,
370 rev,
392 rev,
371 tombstone,
393 tombstone,
372 ):
394 ):
373 """rewrite and append a censored revision"""
395 """rewrite and append a censored revision"""
374 (
396 (
375 old_data_file,
397 old_data_file,
376 old_sidedata_file,
398 old_sidedata_file,
377 new_index_file,
399 new_index_file,
378 new_data_file,
400 new_data_file,
379 new_sidedata_file,
401 new_sidedata_file,
380 ) = all_files
402 ) = all_files
381 entry = old_index[rev]
403 entry = old_index[rev]
382
404
383 # XXX consider trying the default compression too
405 # XXX consider trying the default compression too
384 new_data_size = len(tombstone)
406 new_data_size = len(tombstone)
385 new_data_offset = new_data_file.tell()
407 new_data_offset = new_data_file.tell()
386 new_data_file.write(tombstone)
408 new_data_file.write(tombstone)
387
409
388 # we are not adding any sidedata as they might leak info about the censored version
410 # we are not adding any sidedata as they might leak info about the censored version
389
411
390 link_rev = entry[ENTRY_LINK_REV]
412 link_rev = entry[ENTRY_LINK_REV]
391
413
392 p1 = entry[ENTRY_PARENT_1]
414 p1 = entry[ENTRY_PARENT_1]
393 p2 = entry[ENTRY_PARENT_2]
415 p2 = entry[ENTRY_PARENT_2]
394
416
395 new_entry = revlogutils.entry(
417 new_entry = revlogutils.entry(
396 flags=constants.REVIDX_ISCENSORED,
418 flags=constants.REVIDX_ISCENSORED,
397 data_offset=new_data_offset,
419 data_offset=new_data_offset,
398 data_compressed_length=new_data_size,
420 data_compressed_length=new_data_size,
399 data_uncompressed_length=new_data_size,
421 data_uncompressed_length=new_data_size,
400 data_delta_base=rev,
422 data_delta_base=rev,
401 link_rev=link_rev,
423 link_rev=link_rev,
402 parent_rev_1=p1,
424 parent_rev_1=p1,
403 parent_rev_2=p2,
425 parent_rev_2=p2,
404 node_id=entry[ENTRY_NODE_ID],
426 node_id=entry[ENTRY_NODE_ID],
405 sidedata_offset=0,
427 sidedata_offset=0,
406 sidedata_compressed_length=0,
428 sidedata_compressed_length=0,
407 data_compression_mode=COMP_MODE_PLAIN,
429 data_compression_mode=COMP_MODE_PLAIN,
408 sidedata_compression_mode=COMP_MODE_PLAIN,
430 sidedata_compression_mode=COMP_MODE_PLAIN,
409 )
431 )
410 revlog.index.append(new_entry)
432 revlog.index.append(new_entry)
411 entry_bin = revlog.index.entry_binary(rev)
433 entry_bin = revlog.index.entry_binary(rev)
412 new_index_file.write(entry_bin)
434 new_index_file.write(entry_bin)
413 revlog._docket.index_end = new_index_file.tell()
435 revlog._docket.index_end = new_index_file.tell()
414 revlog._docket.data_end = new_data_file.tell()
436 revlog._docket.data_end = new_data_file.tell()
General Comments 0
You need to be logged in to leave comments. Login now