Show More
@@ -1,359 +1,360 b'' | |||||
1 | # censor code related to censoring revision |
|
1 | # censor code related to censoring revision | |
2 | # coding: utf8 |
|
2 | # coding: utf8 | |
3 | # |
|
3 | # | |
4 | # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net> |
|
4 | # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net> | |
5 | # Copyright 2015 Google, Inc <martinvonz@google.com> |
|
5 | # Copyright 2015 Google, Inc <martinvonz@google.com> | |
6 | # |
|
6 | # | |
7 | # This software may be used and distributed according to the terms of the |
|
7 | # This software may be used and distributed according to the terms of the | |
8 | # GNU General Public License version 2 or any later version. |
|
8 | # GNU General Public License version 2 or any later version. | |
9 |
|
9 | |||
10 | import contextlib |
|
10 | import contextlib | |
11 | import os |
|
11 | import os | |
12 |
|
12 | |||
13 | from ..node import ( |
|
13 | from ..node import ( | |
14 | nullrev, |
|
14 | nullrev, | |
15 | ) |
|
15 | ) | |
16 | from .constants import ( |
|
16 | from .constants import ( | |
17 | COMP_MODE_PLAIN, |
|
17 | COMP_MODE_PLAIN, | |
18 | ENTRY_DATA_COMPRESSED_LENGTH, |
|
18 | ENTRY_DATA_COMPRESSED_LENGTH, | |
19 | ENTRY_DATA_COMPRESSION_MODE, |
|
19 | ENTRY_DATA_COMPRESSION_MODE, | |
20 | ENTRY_DATA_OFFSET, |
|
20 | ENTRY_DATA_OFFSET, | |
21 | ENTRY_DATA_UNCOMPRESSED_LENGTH, |
|
21 | ENTRY_DATA_UNCOMPRESSED_LENGTH, | |
22 | ENTRY_DELTA_BASE, |
|
22 | ENTRY_DELTA_BASE, | |
23 | ENTRY_LINK_REV, |
|
23 | ENTRY_LINK_REV, | |
24 | ENTRY_NODE_ID, |
|
24 | ENTRY_NODE_ID, | |
25 | ENTRY_PARENT_1, |
|
25 | ENTRY_PARENT_1, | |
26 | ENTRY_PARENT_2, |
|
26 | ENTRY_PARENT_2, | |
27 | ENTRY_SIDEDATA_COMPRESSED_LENGTH, |
|
27 | ENTRY_SIDEDATA_COMPRESSED_LENGTH, | |
28 | ENTRY_SIDEDATA_COMPRESSION_MODE, |
|
28 | ENTRY_SIDEDATA_COMPRESSION_MODE, | |
29 | ENTRY_SIDEDATA_OFFSET, |
|
29 | ENTRY_SIDEDATA_OFFSET, | |
30 | REVLOGV0, |
|
30 | REVLOGV0, | |
31 | REVLOGV1, |
|
31 | REVLOGV1, | |
32 | ) |
|
32 | ) | |
33 | from ..i18n import _ |
|
33 | from ..i18n import _ | |
34 |
|
34 | |||
35 | from .. import ( |
|
35 | from .. import ( | |
36 | error, |
|
36 | error, | |
37 | pycompat, |
|
37 | pycompat, | |
38 | revlogutils, |
|
38 | revlogutils, | |
39 | util, |
|
39 | util, | |
40 | ) |
|
40 | ) | |
41 | from ..utils import ( |
|
41 | from ..utils import ( | |
42 | storageutil, |
|
42 | storageutil, | |
43 | ) |
|
43 | ) | |
44 | from . import ( |
|
44 | from . import ( | |
45 | constants, |
|
45 | constants, | |
46 | deltas, |
|
46 | deltas, | |
47 | ) |
|
47 | ) | |
48 |
|
48 | |||
49 |
|
49 | |||
50 | def v1_censor(rl, tr, censornode, tombstone=b''): |
|
50 | def v1_censor(rl, tr, censornode, tombstone=b''): | |
51 | """censors a revision in a "version 1" revlog""" |
|
51 | """censors a revision in a "version 1" revlog""" | |
52 | assert rl._format_version == constants.REVLOGV1, rl._format_version |
|
52 | assert rl._format_version == constants.REVLOGV1, rl._format_version | |
53 |
|
53 | |||
54 | # avoid cycle |
|
54 | # avoid cycle | |
55 | from .. import revlog |
|
55 | from .. import revlog | |
56 |
|
56 | |||
57 | censorrev = rl.rev(censornode) |
|
57 | censorrev = rl.rev(censornode) | |
58 | tombstone = storageutil.packmeta({b'censored': tombstone}, b'') |
|
58 | tombstone = storageutil.packmeta({b'censored': tombstone}, b'') | |
59 |
|
59 | |||
60 | # Rewriting the revlog in place is hard. Our strategy for censoring is |
|
60 | # Rewriting the revlog in place is hard. Our strategy for censoring is | |
61 | # to create a new revlog, copy all revisions to it, then replace the |
|
61 | # to create a new revlog, copy all revisions to it, then replace the | |
62 | # revlogs on transaction close. |
|
62 | # revlogs on transaction close. | |
63 | # |
|
63 | # | |
64 | # This is a bit dangerous. We could easily have a mismatch of state. |
|
64 | # This is a bit dangerous. We could easily have a mismatch of state. | |
65 | newrl = revlog.revlog( |
|
65 | newrl = revlog.revlog( | |
66 | rl.opener, |
|
66 | rl.opener, | |
67 | target=rl.target, |
|
67 | target=rl.target, | |
68 | radix=rl.radix, |
|
68 | radix=rl.radix, | |
69 | postfix=b'tmpcensored', |
|
69 | postfix=b'tmpcensored', | |
70 | censorable=True, |
|
70 | censorable=True, | |
71 | ) |
|
71 | ) | |
72 | newrl._format_version = rl._format_version |
|
72 | newrl._format_version = rl._format_version | |
73 | newrl._format_flags = rl._format_flags |
|
73 | newrl._format_flags = rl._format_flags | |
74 | newrl._generaldelta = rl._generaldelta |
|
74 | newrl._generaldelta = rl._generaldelta | |
75 | newrl._parse_index = rl._parse_index |
|
75 | newrl._parse_index = rl._parse_index | |
76 |
|
76 | |||
77 | for rev in rl.revs(): |
|
77 | for rev in rl.revs(): | |
78 | node = rl.node(rev) |
|
78 | node = rl.node(rev) | |
79 | p1, p2 = rl.parents(node) |
|
79 | p1, p2 = rl.parents(node) | |
80 |
|
80 | |||
81 | if rev == censorrev: |
|
81 | if rev == censorrev: | |
82 | newrl.addrawrevision( |
|
82 | newrl.addrawrevision( | |
83 | tombstone, |
|
83 | tombstone, | |
84 | tr, |
|
84 | tr, | |
85 | rl.linkrev(censorrev), |
|
85 | rl.linkrev(censorrev), | |
86 | p1, |
|
86 | p1, | |
87 | p2, |
|
87 | p2, | |
88 | censornode, |
|
88 | censornode, | |
89 | constants.REVIDX_ISCENSORED, |
|
89 | constants.REVIDX_ISCENSORED, | |
90 | ) |
|
90 | ) | |
91 |
|
91 | |||
92 | if newrl.deltaparent(rev) != nullrev: |
|
92 | if newrl.deltaparent(rev) != nullrev: | |
93 | m = _(b'censored revision stored as delta; cannot censor') |
|
93 | m = _(b'censored revision stored as delta; cannot censor') | |
94 | h = _( |
|
94 | h = _( | |
95 | b'censoring of revlogs is not fully implemented;' |
|
95 | b'censoring of revlogs is not fully implemented;' | |
96 | b' please report this bug' |
|
96 | b' please report this bug' | |
97 | ) |
|
97 | ) | |
98 | raise error.Abort(m, hint=h) |
|
98 | raise error.Abort(m, hint=h) | |
99 | continue |
|
99 | continue | |
100 |
|
100 | |||
101 | if rl.iscensored(rev): |
|
101 | if rl.iscensored(rev): | |
102 | if rl.deltaparent(rev) != nullrev: |
|
102 | if rl.deltaparent(rev) != nullrev: | |
103 | m = _( |
|
103 | m = _( | |
104 | b'cannot censor due to censored ' |
|
104 | b'cannot censor due to censored ' | |
105 | b'revision having delta stored' |
|
105 | b'revision having delta stored' | |
106 | ) |
|
106 | ) | |
107 | raise error.Abort(m) |
|
107 | raise error.Abort(m) | |
108 | rawtext = rl._chunk(rev) |
|
108 | rawtext = rl._chunk(rev) | |
109 | else: |
|
109 | else: | |
110 | rawtext = rl.rawdata(rev) |
|
110 | rawtext = rl.rawdata(rev) | |
111 |
|
111 | |||
112 | newrl.addrawrevision( |
|
112 | newrl.addrawrevision( | |
113 | rawtext, tr, rl.linkrev(rev), p1, p2, node, rl.flags(rev) |
|
113 | rawtext, tr, rl.linkrev(rev), p1, p2, node, rl.flags(rev) | |
114 | ) |
|
114 | ) | |
115 |
|
115 | |||
116 | tr.addbackup(rl._indexfile, location=b'store') |
|
116 | tr.addbackup(rl._indexfile, location=b'store') | |
117 | if not rl._inline: |
|
117 | if not rl._inline: | |
118 | tr.addbackup(rl._datafile, location=b'store') |
|
118 | tr.addbackup(rl._datafile, location=b'store') | |
119 |
|
119 | |||
120 | rl.opener.rename(newrl._indexfile, rl._indexfile) |
|
120 | rl.opener.rename(newrl._indexfile, rl._indexfile) | |
121 | if not rl._inline: |
|
121 | if not rl._inline: | |
122 | rl.opener.rename(newrl._datafile, rl._datafile) |
|
122 | rl.opener.rename(newrl._datafile, rl._datafile) | |
123 |
|
123 | |||
124 | rl.clearcaches() |
|
124 | rl.clearcaches() | |
125 | rl._loadindex() |
|
125 | rl._loadindex() | |
126 |
|
126 | |||
127 |
|
127 | |||
128 | def v2_censor(rl, tr, censornode, tombstone=b''): |
|
128 | def v2_censor(rl, tr, censornode, tombstone=b''): | |
129 | """censors a revision in a "version 2" revlog""" |
|
129 | """censors a revision in a "version 2" revlog""" | |
130 | # General principle |
|
130 | # General principle | |
131 | # |
|
131 | # | |
132 | # We create new revlog files (index/data/sidedata) to copy the content of |
|
132 | # We create new revlog files (index/data/sidedata) to copy the content of | |
133 | # the existing data without the censored data. |
|
133 | # the existing data without the censored data. | |
134 | # |
|
134 | # | |
135 | # We need to recompute new delta for any revision that used the censored |
|
135 | # We need to recompute new delta for any revision that used the censored | |
136 | # revision as delta base. As the cumulative size of the new delta may be |
|
136 | # revision as delta base. As the cumulative size of the new delta may be | |
137 | # large, we store them in a temporary file until they are stored in their |
|
137 | # large, we store them in a temporary file until they are stored in their | |
138 | # final destination. |
|
138 | # final destination. | |
139 | # |
|
139 | # | |
140 | # All data before the censored data can be blindly copied. The rest needs |
|
140 | # All data before the censored data can be blindly copied. The rest needs | |
141 | # to be copied as we go and the associated index entry needs adjustement. |
|
141 | # to be copied as we go and the associated index entry needs adjustement. | |
142 |
|
142 | |||
143 | assert rl._format_version != REVLOGV0, rl._format_version |
|
143 | assert rl._format_version != REVLOGV0, rl._format_version | |
144 | assert rl._format_version != REVLOGV1, rl._format_version |
|
144 | assert rl._format_version != REVLOGV1, rl._format_version | |
145 |
|
145 | |||
146 | old_index = rl.index |
|
146 | old_index = rl.index | |
147 | docket = rl._docket |
|
147 | docket = rl._docket | |
148 |
|
148 | |||
149 | censor_rev = rl.rev(censornode) |
|
149 | censor_rev = rl.rev(censornode) | |
150 | tombstone = storageutil.packmeta({b'censored': tombstone}, b'') |
|
150 | tombstone = storageutil.packmeta({b'censored': tombstone}, b'') | |
151 |
|
151 | |||
152 | censored_entry = rl.index[censor_rev] |
|
152 | censored_entry = rl.index[censor_rev] | |
153 | index_cutoff = rl.index.entry_size * censor_rev |
|
153 | index_cutoff = rl.index.entry_size * censor_rev | |
154 | data_cutoff = censored_entry[ENTRY_DATA_OFFSET] >> 16 |
|
154 | data_cutoff = censored_entry[ENTRY_DATA_OFFSET] >> 16 | |
155 | sidedata_cutoff = rl.sidedata_cut_off(censor_rev) |
|
155 | sidedata_cutoff = rl.sidedata_cut_off(censor_rev) | |
156 |
|
156 | |||
157 | # rev β (new_base, data_start, data_end) |
|
157 | # rev β (new_base, data_start, data_end) | |
158 | rewritten_entries = {} |
|
158 | rewritten_entries = {} | |
159 |
|
159 | |||
160 | dc = deltas.deltacomputer(rl) |
|
160 | dc = deltas.deltacomputer(rl) | |
161 | excl = [censor_rev] |
|
161 | excl = [censor_rev] | |
162 |
|
162 | |||
163 | with pycompat.unnamedtempfile(mode=b"w+b") as tmp_storage: |
|
163 | with pycompat.unnamedtempfile(mode=b"w+b") as tmp_storage: | |
164 | with rl._segmentfile._open_read() as dfh: |
|
164 | with rl._segmentfile._open_read() as dfh: | |
165 | for rev in range(censor_rev + 1, len(old_index)): |
|
165 | for rev in range(censor_rev + 1, len(old_index)): | |
166 | entry = old_index[rev] |
|
166 | entry = old_index[rev] | |
167 | if censor_rev != entry[ENTRY_DELTA_BASE]: |
|
167 | if censor_rev != entry[ENTRY_DELTA_BASE]: | |
168 | continue |
|
168 | continue | |
169 | # This is a revision that use the censored revision as the base |
|
169 | # This is a revision that use the censored revision as the base | |
170 | # for its delta. We need a need new deltas |
|
170 | # for its delta. We need a need new deltas | |
171 | if entry[ENTRY_DATA_UNCOMPRESSED_LENGTH] == 0: |
|
171 | if entry[ENTRY_DATA_UNCOMPRESSED_LENGTH] == 0: | |
172 | # this revision is empty, we can delta against nullrev |
|
172 | # this revision is empty, we can delta against nullrev | |
173 | rewritten_entries[rev] = (nullrev, 0, 0) |
|
173 | rewritten_entries[rev] = (nullrev, 0, 0) | |
174 | else: |
|
174 | else: | |
175 |
|
175 | |||
176 | text = rl.rawdata(rev, _df=dfh) |
|
176 | text = rl.rawdata(rev, _df=dfh) | |
177 | info = revlogutils.revisioninfo( |
|
177 | info = revlogutils.revisioninfo( | |
178 | node=entry[ENTRY_NODE_ID], |
|
178 | node=entry[ENTRY_NODE_ID], | |
179 | p1=rl.node(entry[ENTRY_PARENT_1]), |
|
179 | p1=rl.node(entry[ENTRY_PARENT_1]), | |
180 | p2=rl.node(entry[ENTRY_PARENT_2]), |
|
180 | p2=rl.node(entry[ENTRY_PARENT_2]), | |
181 | btext=[text], |
|
181 | btext=[text], | |
182 | textlen=len(text), |
|
182 | textlen=len(text), | |
183 | cachedelta=None, |
|
183 | cachedelta=None, | |
184 | flags=entry[ENTRY_DATA_OFFSET] & 0xFFFF, |
|
184 | flags=entry[ENTRY_DATA_OFFSET] & 0xFFFF, | |
185 | ) |
|
185 | ) | |
186 | d = dc.finddeltainfo( |
|
186 | d = dc.finddeltainfo( | |
187 | info, dfh, excluded_bases=excl, target_rev=rev |
|
187 | info, dfh, excluded_bases=excl, target_rev=rev | |
188 | ) |
|
188 | ) | |
189 | default_comp = rl._docket.default_compression_header |
|
189 | default_comp = rl._docket.default_compression_header | |
190 | comp_mode, d = deltas.delta_compression(default_comp, d) |
|
190 | comp_mode, d = deltas.delta_compression(default_comp, d) | |
191 | # using `tell` is a bit lazy, but we are not here for speed |
|
191 | # using `tell` is a bit lazy, but we are not here for speed | |
192 | start = tmp_storage.tell() |
|
192 | start = tmp_storage.tell() | |
193 | tmp_storage.write(d.data[1]) |
|
193 | tmp_storage.write(d.data[1]) | |
194 | end = tmp_storage.tell() |
|
194 | end = tmp_storage.tell() | |
195 | rewritten_entries[rev] = (d.base, start, end, comp_mode) |
|
195 | rewritten_entries[rev] = (d.base, start, end, comp_mode) | |
196 |
|
196 | |||
197 | old_index_filepath = rl.opener.join(docket.index_filepath()) |
|
197 | old_index_filepath = rl.opener.join(docket.index_filepath()) | |
198 | old_data_filepath = rl.opener.join(docket.data_filepath()) |
|
198 | old_data_filepath = rl.opener.join(docket.data_filepath()) | |
199 | old_sidedata_filepath = rl.opener.join(docket.sidedata_filepath()) |
|
199 | old_sidedata_filepath = rl.opener.join(docket.sidedata_filepath()) | |
200 |
|
200 | |||
201 | new_index_filepath = rl.opener.join(docket.new_index_file()) |
|
201 | new_index_filepath = rl.opener.join(docket.new_index_file()) | |
202 | new_data_filepath = rl.opener.join(docket.new_data_file()) |
|
202 | new_data_filepath = rl.opener.join(docket.new_data_file()) | |
203 | new_sidedata_filepath = rl.opener.join(docket.new_sidedata_file()) |
|
203 | new_sidedata_filepath = rl.opener.join(docket.new_sidedata_file()) | |
204 |
|
204 | |||
205 | util.copyfile( |
|
205 | util.copyfile( | |
206 | old_index_filepath, new_index_filepath, nb_bytes=index_cutoff |
|
206 | old_index_filepath, new_index_filepath, nb_bytes=index_cutoff | |
207 | ) |
|
207 | ) | |
208 | util.copyfile( |
|
208 | util.copyfile( | |
209 | old_data_filepath, new_data_filepath, nb_bytes=data_cutoff |
|
209 | old_data_filepath, new_data_filepath, nb_bytes=data_cutoff | |
210 | ) |
|
210 | ) | |
211 | util.copyfile( |
|
211 | util.copyfile( | |
212 | old_sidedata_filepath, |
|
212 | old_sidedata_filepath, | |
213 | new_sidedata_filepath, |
|
213 | new_sidedata_filepath, | |
214 | nb_bytes=sidedata_cutoff, |
|
214 | nb_bytes=sidedata_cutoff, | |
215 | ) |
|
215 | ) | |
216 | rl.opener.register_file(docket.index_filepath()) |
|
216 | rl.opener.register_file(docket.index_filepath()) | |
217 | rl.opener.register_file(docket.data_filepath()) |
|
217 | rl.opener.register_file(docket.data_filepath()) | |
218 | rl.opener.register_file(docket.sidedata_filepath()) |
|
218 | rl.opener.register_file(docket.sidedata_filepath()) | |
219 |
|
219 | |||
220 | docket.index_end = index_cutoff |
|
220 | docket.index_end = index_cutoff | |
221 | docket.data_end = data_cutoff |
|
221 | docket.data_end = data_cutoff | |
222 | docket.sidedata_end = sidedata_cutoff |
|
222 | docket.sidedata_end = sidedata_cutoff | |
223 |
|
223 | |||
224 | # reload the revlog internal information |
|
224 | # reload the revlog internal information | |
225 | rl.clearcaches() |
|
225 | rl.clearcaches() | |
226 | rl._loadindex(docket=docket) |
|
226 | rl._loadindex(docket=docket) | |
227 |
|
227 | |||
228 | @contextlib.contextmanager |
|
228 | @contextlib.contextmanager | |
229 | def all_files(): |
|
229 | def all_files(): | |
230 | # hide opening in an helper function to please check-code, black |
|
230 | # hide opening in an helper function to please check-code, black | |
231 | # and various python ersion at the same time |
|
231 | # and various python ersion at the same time | |
232 | with open(old_data_filepath, 'rb') as old_data_file: |
|
232 | with open(old_data_filepath, 'rb') as old_data_file: | |
233 | with open(old_sidedata_filepath, 'rb') as old_sidedata_file: |
|
233 | with open(old_sidedata_filepath, 'rb') as old_sidedata_file: | |
234 | with open(new_index_filepath, 'r+b') as new_index_file: |
|
234 | with open(new_index_filepath, 'r+b') as new_index_file: | |
235 | with open(new_data_filepath, 'r+b') as new_data_file: |
|
235 | with open(new_data_filepath, 'r+b') as new_data_file: | |
236 | with open( |
|
236 | with open( | |
237 | new_sidedata_filepath, 'r+b' |
|
237 | new_sidedata_filepath, 'r+b' | |
238 | ) as new_sidedata_file: |
|
238 | ) as new_sidedata_file: | |
239 | yield ( |
|
239 | yield ( | |
240 | old_data_file, |
|
240 | old_data_file, | |
241 | old_sidedata_file, |
|
241 | old_sidedata_file, | |
242 | new_index_file, |
|
242 | new_index_file, | |
243 | new_data_file, |
|
243 | new_data_file, | |
244 | new_sidedata_file, |
|
244 | new_sidedata_file, | |
245 | ) |
|
245 | ) | |
246 |
|
246 | |||
247 | # we dont need to open the old index file since its content already |
|
247 | # we dont need to open the old index file since its content already | |
248 | # exist in a usable form in `old_index`. |
|
248 | # exist in a usable form in `old_index`. | |
249 |
with all_files() as |
|
249 | with all_files() as open_files: | |
|
250 | ( | |||
250 | old_data_file, |
|
251 | old_data_file, | |
251 | old_sidedata_file, |
|
252 | old_sidedata_file, | |
252 | new_index_file, |
|
253 | new_index_file, | |
253 | new_data_file, |
|
254 | new_data_file, | |
254 | new_sidedata_file, |
|
255 | new_sidedata_file, | |
255 | ): |
|
256 | ) = open_files | |
256 | new_index_file.seek(0, os.SEEK_END) |
|
257 | new_index_file.seek(0, os.SEEK_END) | |
257 | assert new_index_file.tell() == index_cutoff |
|
258 | assert new_index_file.tell() == index_cutoff | |
258 | new_data_file.seek(0, os.SEEK_END) |
|
259 | new_data_file.seek(0, os.SEEK_END) | |
259 | assert new_data_file.tell() == data_cutoff |
|
260 | assert new_data_file.tell() == data_cutoff | |
260 | new_sidedata_file.seek(0, os.SEEK_END) |
|
261 | new_sidedata_file.seek(0, os.SEEK_END) | |
261 | assert new_sidedata_file.tell() == sidedata_cutoff |
|
262 | assert new_sidedata_file.tell() == sidedata_cutoff | |
262 |
|
263 | |||
263 | ### writing the censored revision |
|
264 | ### writing the censored revision | |
264 | entry = old_index[censor_rev] |
|
265 | entry = old_index[censor_rev] | |
265 |
|
266 | |||
266 | # XXX consider trying the default compression too |
|
267 | # XXX consider trying the default compression too | |
267 | new_data_size = len(tombstone) |
|
268 | new_data_size = len(tombstone) | |
268 | new_data_offset = new_data_file.tell() |
|
269 | new_data_offset = new_data_file.tell() | |
269 | new_data_file.write(tombstone) |
|
270 | new_data_file.write(tombstone) | |
270 |
|
271 | |||
271 | # we are not adding any sidedata as they might leak info about the censored version |
|
272 | # we are not adding any sidedata as they might leak info about the censored version | |
272 |
|
273 | |||
273 | new_entry = revlogutils.entry( |
|
274 | new_entry = revlogutils.entry( | |
274 | flags=constants.REVIDX_ISCENSORED, |
|
275 | flags=constants.REVIDX_ISCENSORED, | |
275 | data_offset=new_data_offset, |
|
276 | data_offset=new_data_offset, | |
276 | data_compressed_length=new_data_size, |
|
277 | data_compressed_length=new_data_size, | |
277 | data_uncompressed_length=new_data_size, |
|
278 | data_uncompressed_length=new_data_size, | |
278 | data_delta_base=censor_rev, |
|
279 | data_delta_base=censor_rev, | |
279 | link_rev=entry[ENTRY_LINK_REV], |
|
280 | link_rev=entry[ENTRY_LINK_REV], | |
280 | parent_rev_1=entry[ENTRY_PARENT_1], |
|
281 | parent_rev_1=entry[ENTRY_PARENT_1], | |
281 | parent_rev_2=entry[ENTRY_PARENT_2], |
|
282 | parent_rev_2=entry[ENTRY_PARENT_2], | |
282 | node_id=entry[ENTRY_NODE_ID], |
|
283 | node_id=entry[ENTRY_NODE_ID], | |
283 | sidedata_offset=0, |
|
284 | sidedata_offset=0, | |
284 | sidedata_compressed_length=0, |
|
285 | sidedata_compressed_length=0, | |
285 | data_compression_mode=COMP_MODE_PLAIN, |
|
286 | data_compression_mode=COMP_MODE_PLAIN, | |
286 | sidedata_compression_mode=COMP_MODE_PLAIN, |
|
287 | sidedata_compression_mode=COMP_MODE_PLAIN, | |
287 | ) |
|
288 | ) | |
288 | rl.index.append(new_entry) |
|
289 | rl.index.append(new_entry) | |
289 | entry_bin = rl.index.entry_binary(censor_rev) |
|
290 | entry_bin = rl.index.entry_binary(censor_rev) | |
290 | new_index_file.write(entry_bin) |
|
291 | new_index_file.write(entry_bin) | |
291 | docket.index_end = new_index_file.tell() |
|
292 | docket.index_end = new_index_file.tell() | |
292 | docket.data_end = new_data_file.tell() |
|
293 | docket.data_end = new_data_file.tell() | |
293 |
|
294 | |||
294 | #### Writing all subsequent revisions |
|
295 | #### Writing all subsequent revisions | |
295 | for rev in range(censor_rev + 1, len(old_index)): |
|
296 | for rev in range(censor_rev + 1, len(old_index)): | |
296 | entry = old_index[rev] |
|
297 | entry = old_index[rev] | |
297 | flags = entry[ENTRY_DATA_OFFSET] & 0xFFFF |
|
298 | flags = entry[ENTRY_DATA_OFFSET] & 0xFFFF | |
298 | old_data_offset = entry[ENTRY_DATA_OFFSET] >> 16 |
|
299 | old_data_offset = entry[ENTRY_DATA_OFFSET] >> 16 | |
299 |
|
300 | |||
300 | if rev not in rewritten_entries: |
|
301 | if rev not in rewritten_entries: | |
301 | old_data_file.seek(old_data_offset) |
|
302 | old_data_file.seek(old_data_offset) | |
302 | new_data_size = entry[ENTRY_DATA_COMPRESSED_LENGTH] |
|
303 | new_data_size = entry[ENTRY_DATA_COMPRESSED_LENGTH] | |
303 | new_data = old_data_file.read(new_data_size) |
|
304 | new_data = old_data_file.read(new_data_size) | |
304 | data_delta_base = entry[ENTRY_DELTA_BASE] |
|
305 | data_delta_base = entry[ENTRY_DELTA_BASE] | |
305 | d_comp_mode = entry[ENTRY_DATA_COMPRESSION_MODE] |
|
306 | d_comp_mode = entry[ENTRY_DATA_COMPRESSION_MODE] | |
306 | else: |
|
307 | else: | |
307 | ( |
|
308 | ( | |
308 | data_delta_base, |
|
309 | data_delta_base, | |
309 | start, |
|
310 | start, | |
310 | end, |
|
311 | end, | |
311 | d_comp_mode, |
|
312 | d_comp_mode, | |
312 | ) = rewritten_entries[rev] |
|
313 | ) = rewritten_entries[rev] | |
313 | new_data_size = end - start |
|
314 | new_data_size = end - start | |
314 | tmp_storage.seek(start) |
|
315 | tmp_storage.seek(start) | |
315 | new_data = tmp_storage.read(new_data_size) |
|
316 | new_data = tmp_storage.read(new_data_size) | |
316 |
|
317 | |||
317 | # It might be faster to group continuous read/write operation, |
|
318 | # It might be faster to group continuous read/write operation, | |
318 | # however, this is censor, an operation that is not focussed |
|
319 | # however, this is censor, an operation that is not focussed | |
319 | # around stellar performance. So I have not written this |
|
320 | # around stellar performance. So I have not written this | |
320 | # optimisation yet. |
|
321 | # optimisation yet. | |
321 | new_data_offset = new_data_file.tell() |
|
322 | new_data_offset = new_data_file.tell() | |
322 | new_data_file.write(new_data) |
|
323 | new_data_file.write(new_data) | |
323 |
|
324 | |||
324 | sidedata_size = entry[ENTRY_SIDEDATA_COMPRESSED_LENGTH] |
|
325 | sidedata_size = entry[ENTRY_SIDEDATA_COMPRESSED_LENGTH] | |
325 | new_sidedata_offset = new_sidedata_file.tell() |
|
326 | new_sidedata_offset = new_sidedata_file.tell() | |
326 | if 0 < sidedata_size: |
|
327 | if 0 < sidedata_size: | |
327 | old_sidedata_offset = entry[ENTRY_SIDEDATA_OFFSET] |
|
328 | old_sidedata_offset = entry[ENTRY_SIDEDATA_OFFSET] | |
328 | old_sidedata_file.seek(old_sidedata_offset) |
|
329 | old_sidedata_file.seek(old_sidedata_offset) | |
329 | new_sidedata = old_sidedata_file.read(sidedata_size) |
|
330 | new_sidedata = old_sidedata_file.read(sidedata_size) | |
330 | new_sidedata_file.write(new_sidedata) |
|
331 | new_sidedata_file.write(new_sidedata) | |
331 |
|
332 | |||
332 | data_uncompressed_length = entry[ENTRY_DATA_UNCOMPRESSED_LENGTH] |
|
333 | data_uncompressed_length = entry[ENTRY_DATA_UNCOMPRESSED_LENGTH] | |
333 | sd_com_mode = entry[ENTRY_SIDEDATA_COMPRESSION_MODE] |
|
334 | sd_com_mode = entry[ENTRY_SIDEDATA_COMPRESSION_MODE] | |
334 | assert data_delta_base <= rev, (data_delta_base, rev) |
|
335 | assert data_delta_base <= rev, (data_delta_base, rev) | |
335 |
|
336 | |||
336 | new_entry = revlogutils.entry( |
|
337 | new_entry = revlogutils.entry( | |
337 | flags=flags, |
|
338 | flags=flags, | |
338 | data_offset=new_data_offset, |
|
339 | data_offset=new_data_offset, | |
339 | data_compressed_length=new_data_size, |
|
340 | data_compressed_length=new_data_size, | |
340 | data_uncompressed_length=data_uncompressed_length, |
|
341 | data_uncompressed_length=data_uncompressed_length, | |
341 | data_delta_base=data_delta_base, |
|
342 | data_delta_base=data_delta_base, | |
342 | link_rev=entry[ENTRY_LINK_REV], |
|
343 | link_rev=entry[ENTRY_LINK_REV], | |
343 | parent_rev_1=entry[ENTRY_PARENT_1], |
|
344 | parent_rev_1=entry[ENTRY_PARENT_1], | |
344 | parent_rev_2=entry[ENTRY_PARENT_2], |
|
345 | parent_rev_2=entry[ENTRY_PARENT_2], | |
345 | node_id=entry[ENTRY_NODE_ID], |
|
346 | node_id=entry[ENTRY_NODE_ID], | |
346 | sidedata_offset=new_sidedata_offset, |
|
347 | sidedata_offset=new_sidedata_offset, | |
347 | sidedata_compressed_length=sidedata_size, |
|
348 | sidedata_compressed_length=sidedata_size, | |
348 | data_compression_mode=d_comp_mode, |
|
349 | data_compression_mode=d_comp_mode, | |
349 | sidedata_compression_mode=sd_com_mode, |
|
350 | sidedata_compression_mode=sd_com_mode, | |
350 | ) |
|
351 | ) | |
351 | rl.index.append(new_entry) |
|
352 | rl.index.append(new_entry) | |
352 | entry_bin = rl.index.entry_binary(rev) |
|
353 | entry_bin = rl.index.entry_binary(rev) | |
353 | new_index_file.write(entry_bin) |
|
354 | new_index_file.write(entry_bin) | |
354 |
|
355 | |||
355 | docket.index_end = new_index_file.tell() |
|
356 | docket.index_end = new_index_file.tell() | |
356 | docket.data_end = new_data_file.tell() |
|
357 | docket.data_end = new_data_file.tell() | |
357 | docket.sidedata_end = new_sidedata_file.tell() |
|
358 | docket.sidedata_end = new_sidedata_file.tell() | |
358 |
|
359 | |||
359 | docket.write(transaction=None, stripping=True) |
|
360 | docket.write(transaction=None, stripping=True) |
General Comments 0
You need to be logged in to leave comments.
Login now