##// END OF EJS Templates
issue6528: also filter delta on the fly when applying a changegroup...
marmoute -
r48629:c30ca163 stable
parent child Browse files
Show More
1 NO CONTENT: new file 100644, binary diff hidden
NO CONTENT: new file 100644, binary diff hidden
1 NO CONTENT: new file 100644, binary diff hidden
NO CONTENT: new file 100644, binary diff hidden
@@ -1,287 +1,291
1 # filelog.py - file history class for mercurial
1 # filelog.py - file history class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
3 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms of the
5 # This software may be used and distributed according to the terms of the
6 # GNU General Public License version 2 or any later version.
6 # GNU General Public License version 2 or any later version.
7
7
8 from __future__ import absolute_import
8 from __future__ import absolute_import
9
9
10 from .i18n import _
10 from .i18n import _
11 from .node import nullrev
11 from .node import nullrev
12 from . import (
12 from . import (
13 error,
13 error,
14 revlog,
14 revlog,
15 )
15 )
16 from .interfaces import (
16 from .interfaces import (
17 repository,
17 repository,
18 util as interfaceutil,
18 util as interfaceutil,
19 )
19 )
20 from .utils import storageutil
20 from .utils import storageutil
21 from .revlogutils import (
21 from .revlogutils import (
22 constants as revlog_constants,
22 constants as revlog_constants,
23 rewrite,
23 )
24 )
24
25
25
26
26 @interfaceutil.implementer(repository.ifilestorage)
27 @interfaceutil.implementer(repository.ifilestorage)
27 class filelog(object):
28 class filelog(object):
28 def __init__(self, opener, path):
29 def __init__(self, opener, path):
29 self._revlog = revlog.revlog(
30 self._revlog = revlog.revlog(
30 opener,
31 opener,
31 # XXX should use the unencoded path
32 # XXX should use the unencoded path
32 target=(revlog_constants.KIND_FILELOG, path),
33 target=(revlog_constants.KIND_FILELOG, path),
33 radix=b'/'.join((b'data', path)),
34 radix=b'/'.join((b'data', path)),
34 censorable=True,
35 censorable=True,
35 )
36 )
36 # Full name of the user visible file, relative to the repository root.
37 # Full name of the user visible file, relative to the repository root.
37 # Used by LFS.
38 # Used by LFS.
38 self._revlog.filename = path
39 self._revlog.filename = path
39 self.nullid = self._revlog.nullid
40 self.nullid = self._revlog.nullid
40
41
41 def __len__(self):
42 def __len__(self):
42 return len(self._revlog)
43 return len(self._revlog)
43
44
44 def __iter__(self):
45 def __iter__(self):
45 return self._revlog.__iter__()
46 return self._revlog.__iter__()
46
47
47 def hasnode(self, node):
48 def hasnode(self, node):
48 if node in (self.nullid, nullrev):
49 if node in (self.nullid, nullrev):
49 return False
50 return False
50
51
51 try:
52 try:
52 self._revlog.rev(node)
53 self._revlog.rev(node)
53 return True
54 return True
54 except (TypeError, ValueError, IndexError, error.LookupError):
55 except (TypeError, ValueError, IndexError, error.LookupError):
55 return False
56 return False
56
57
57 def revs(self, start=0, stop=None):
58 def revs(self, start=0, stop=None):
58 return self._revlog.revs(start=start, stop=stop)
59 return self._revlog.revs(start=start, stop=stop)
59
60
60 def parents(self, node):
61 def parents(self, node):
61 return self._revlog.parents(node)
62 return self._revlog.parents(node)
62
63
63 def parentrevs(self, rev):
64 def parentrevs(self, rev):
64 return self._revlog.parentrevs(rev)
65 return self._revlog.parentrevs(rev)
65
66
66 def rev(self, node):
67 def rev(self, node):
67 return self._revlog.rev(node)
68 return self._revlog.rev(node)
68
69
69 def node(self, rev):
70 def node(self, rev):
70 return self._revlog.node(rev)
71 return self._revlog.node(rev)
71
72
72 def lookup(self, node):
73 def lookup(self, node):
73 return storageutil.fileidlookup(
74 return storageutil.fileidlookup(
74 self._revlog, node, self._revlog.display_id
75 self._revlog, node, self._revlog.display_id
75 )
76 )
76
77
77 def linkrev(self, rev):
78 def linkrev(self, rev):
78 return self._revlog.linkrev(rev)
79 return self._revlog.linkrev(rev)
79
80
80 def commonancestorsheads(self, node1, node2):
81 def commonancestorsheads(self, node1, node2):
81 return self._revlog.commonancestorsheads(node1, node2)
82 return self._revlog.commonancestorsheads(node1, node2)
82
83
83 # Used by dagop.blockdescendants().
84 # Used by dagop.blockdescendants().
84 def descendants(self, revs):
85 def descendants(self, revs):
85 return self._revlog.descendants(revs)
86 return self._revlog.descendants(revs)
86
87
87 def heads(self, start=None, stop=None):
88 def heads(self, start=None, stop=None):
88 return self._revlog.heads(start, stop)
89 return self._revlog.heads(start, stop)
89
90
90 # Used by hgweb, children extension.
91 # Used by hgweb, children extension.
91 def children(self, node):
92 def children(self, node):
92 return self._revlog.children(node)
93 return self._revlog.children(node)
93
94
94 def iscensored(self, rev):
95 def iscensored(self, rev):
95 return self._revlog.iscensored(rev)
96 return self._revlog.iscensored(rev)
96
97
97 def revision(self, node, _df=None, raw=False):
98 def revision(self, node, _df=None, raw=False):
98 return self._revlog.revision(node, _df=_df, raw=raw)
99 return self._revlog.revision(node, _df=_df, raw=raw)
99
100
100 def rawdata(self, node, _df=None):
101 def rawdata(self, node, _df=None):
101 return self._revlog.rawdata(node, _df=_df)
102 return self._revlog.rawdata(node, _df=_df)
102
103
103 def emitrevisions(
104 def emitrevisions(
104 self,
105 self,
105 nodes,
106 nodes,
106 nodesorder=None,
107 nodesorder=None,
107 revisiondata=False,
108 revisiondata=False,
108 assumehaveparentrevisions=False,
109 assumehaveparentrevisions=False,
109 deltamode=repository.CG_DELTAMODE_STD,
110 deltamode=repository.CG_DELTAMODE_STD,
110 sidedata_helpers=None,
111 sidedata_helpers=None,
111 ):
112 ):
112 return self._revlog.emitrevisions(
113 return self._revlog.emitrevisions(
113 nodes,
114 nodes,
114 nodesorder=nodesorder,
115 nodesorder=nodesorder,
115 revisiondata=revisiondata,
116 revisiondata=revisiondata,
116 assumehaveparentrevisions=assumehaveparentrevisions,
117 assumehaveparentrevisions=assumehaveparentrevisions,
117 deltamode=deltamode,
118 deltamode=deltamode,
118 sidedata_helpers=sidedata_helpers,
119 sidedata_helpers=sidedata_helpers,
119 )
120 )
120
121
121 def addrevision(
122 def addrevision(
122 self,
123 self,
123 revisiondata,
124 revisiondata,
124 transaction,
125 transaction,
125 linkrev,
126 linkrev,
126 p1,
127 p1,
127 p2,
128 p2,
128 node=None,
129 node=None,
129 flags=revlog.REVIDX_DEFAULT_FLAGS,
130 flags=revlog.REVIDX_DEFAULT_FLAGS,
130 cachedelta=None,
131 cachedelta=None,
131 ):
132 ):
132 return self._revlog.addrevision(
133 return self._revlog.addrevision(
133 revisiondata,
134 revisiondata,
134 transaction,
135 transaction,
135 linkrev,
136 linkrev,
136 p1,
137 p1,
137 p2,
138 p2,
138 node=node,
139 node=node,
139 flags=flags,
140 flags=flags,
140 cachedelta=cachedelta,
141 cachedelta=cachedelta,
141 )
142 )
142
143
143 def addgroup(
144 def addgroup(
144 self,
145 self,
145 deltas,
146 deltas,
146 linkmapper,
147 linkmapper,
147 transaction,
148 transaction,
148 addrevisioncb=None,
149 addrevisioncb=None,
149 duplicaterevisioncb=None,
150 duplicaterevisioncb=None,
150 maybemissingparents=False,
151 maybemissingparents=False,
151 ):
152 ):
152 if maybemissingparents:
153 if maybemissingparents:
153 raise error.Abort(
154 raise error.Abort(
154 _(
155 _(
155 b'revlog storage does not support missing '
156 b'revlog storage does not support missing '
156 b'parents write mode'
157 b'parents write mode'
157 )
158 )
158 )
159 )
159
160
160 with self._revlog._writing(transaction):
161 with self._revlog._writing(transaction):
162
163 deltas = rewrite.filter_delta_issue6528(self._revlog, deltas)
164
161 return self._revlog.addgroup(
165 return self._revlog.addgroup(
162 deltas,
166 deltas,
163 linkmapper,
167 linkmapper,
164 transaction,
168 transaction,
165 addrevisioncb=addrevisioncb,
169 addrevisioncb=addrevisioncb,
166 duplicaterevisioncb=duplicaterevisioncb,
170 duplicaterevisioncb=duplicaterevisioncb,
167 )
171 )
168
172
169 def getstrippoint(self, minlink):
173 def getstrippoint(self, minlink):
170 return self._revlog.getstrippoint(minlink)
174 return self._revlog.getstrippoint(minlink)
171
175
172 def strip(self, minlink, transaction):
176 def strip(self, minlink, transaction):
173 return self._revlog.strip(minlink, transaction)
177 return self._revlog.strip(minlink, transaction)
174
178
175 def censorrevision(self, tr, node, tombstone=b''):
179 def censorrevision(self, tr, node, tombstone=b''):
176 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
180 return self._revlog.censorrevision(tr, node, tombstone=tombstone)
177
181
178 def files(self):
182 def files(self):
179 return self._revlog.files()
183 return self._revlog.files()
180
184
181 def read(self, node):
185 def read(self, node):
182 return storageutil.filtermetadata(self.revision(node))
186 return storageutil.filtermetadata(self.revision(node))
183
187
184 def add(self, text, meta, transaction, link, p1=None, p2=None):
188 def add(self, text, meta, transaction, link, p1=None, p2=None):
185 if meta or text.startswith(b'\1\n'):
189 if meta or text.startswith(b'\1\n'):
186 text = storageutil.packmeta(meta, text)
190 text = storageutil.packmeta(meta, text)
187 rev = self.addrevision(text, transaction, link, p1, p2)
191 rev = self.addrevision(text, transaction, link, p1, p2)
188 return self.node(rev)
192 return self.node(rev)
189
193
190 def renamed(self, node):
194 def renamed(self, node):
191 return storageutil.filerevisioncopied(self, node)
195 return storageutil.filerevisioncopied(self, node)
192
196
193 def size(self, rev):
197 def size(self, rev):
194 """return the size of a given revision"""
198 """return the size of a given revision"""
195
199
196 # for revisions with renames, we have to go the slow way
200 # for revisions with renames, we have to go the slow way
197 node = self.node(rev)
201 node = self.node(rev)
198 if self.renamed(node):
202 if self.renamed(node):
199 return len(self.read(node))
203 return len(self.read(node))
200 if self.iscensored(rev):
204 if self.iscensored(rev):
201 return 0
205 return 0
202
206
203 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
207 # XXX if self.read(node).startswith("\1\n"), this returns (size+4)
204 return self._revlog.size(rev)
208 return self._revlog.size(rev)
205
209
206 def cmp(self, node, text):
210 def cmp(self, node, text):
207 """compare text with a given file revision
211 """compare text with a given file revision
208
212
209 returns True if text is different than what is stored.
213 returns True if text is different than what is stored.
210 """
214 """
211 return not storageutil.filedataequivalent(self, node, text)
215 return not storageutil.filedataequivalent(self, node, text)
212
216
213 def verifyintegrity(self, state):
217 def verifyintegrity(self, state):
214 return self._revlog.verifyintegrity(state)
218 return self._revlog.verifyintegrity(state)
215
219
216 def storageinfo(
220 def storageinfo(
217 self,
221 self,
218 exclusivefiles=False,
222 exclusivefiles=False,
219 sharedfiles=False,
223 sharedfiles=False,
220 revisionscount=False,
224 revisionscount=False,
221 trackedsize=False,
225 trackedsize=False,
222 storedsize=False,
226 storedsize=False,
223 ):
227 ):
224 return self._revlog.storageinfo(
228 return self._revlog.storageinfo(
225 exclusivefiles=exclusivefiles,
229 exclusivefiles=exclusivefiles,
226 sharedfiles=sharedfiles,
230 sharedfiles=sharedfiles,
227 revisionscount=revisionscount,
231 revisionscount=revisionscount,
228 trackedsize=trackedsize,
232 trackedsize=trackedsize,
229 storedsize=storedsize,
233 storedsize=storedsize,
230 )
234 )
231
235
232 # Used by repo upgrade.
236 # Used by repo upgrade.
233 def clone(self, tr, destrevlog, **kwargs):
237 def clone(self, tr, destrevlog, **kwargs):
234 if not isinstance(destrevlog, filelog):
238 if not isinstance(destrevlog, filelog):
235 raise error.ProgrammingError(b'expected filelog to clone()')
239 raise error.ProgrammingError(b'expected filelog to clone()')
236
240
237 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
241 return self._revlog.clone(tr, destrevlog._revlog, **kwargs)
238
242
239
243
240 class narrowfilelog(filelog):
244 class narrowfilelog(filelog):
241 """Filelog variation to be used with narrow stores."""
245 """Filelog variation to be used with narrow stores."""
242
246
243 def __init__(self, opener, path, narrowmatch):
247 def __init__(self, opener, path, narrowmatch):
244 super(narrowfilelog, self).__init__(opener, path)
248 super(narrowfilelog, self).__init__(opener, path)
245 self._narrowmatch = narrowmatch
249 self._narrowmatch = narrowmatch
246
250
247 def renamed(self, node):
251 def renamed(self, node):
248 res = super(narrowfilelog, self).renamed(node)
252 res = super(narrowfilelog, self).renamed(node)
249
253
250 # Renames that come from outside the narrowspec are problematic
254 # Renames that come from outside the narrowspec are problematic
251 # because we may lack the base text for the rename. This can result
255 # because we may lack the base text for the rename. This can result
252 # in code attempting to walk the ancestry or compute a diff
256 # in code attempting to walk the ancestry or compute a diff
253 # encountering a missing revision. We address this by silently
257 # encountering a missing revision. We address this by silently
254 # removing rename metadata if the source file is outside the
258 # removing rename metadata if the source file is outside the
255 # narrow spec.
259 # narrow spec.
256 #
260 #
257 # A better solution would be to see if the base revision is available,
261 # A better solution would be to see if the base revision is available,
258 # rather than assuming it isn't.
262 # rather than assuming it isn't.
259 #
263 #
260 # An even better solution would be to teach all consumers of rename
264 # An even better solution would be to teach all consumers of rename
261 # metadata that the base revision may not be available.
265 # metadata that the base revision may not be available.
262 #
266 #
263 # TODO consider better ways of doing this.
267 # TODO consider better ways of doing this.
264 if res and not self._narrowmatch(res[0]):
268 if res and not self._narrowmatch(res[0]):
265 return None
269 return None
266
270
267 return res
271 return res
268
272
269 def size(self, rev):
273 def size(self, rev):
270 # Because we have a custom renamed() that may lie, we need to call
274 # Because we have a custom renamed() that may lie, we need to call
271 # the base renamed() to report accurate results.
275 # the base renamed() to report accurate results.
272 node = self.node(rev)
276 node = self.node(rev)
273 if super(narrowfilelog, self).renamed(node):
277 if super(narrowfilelog, self).renamed(node):
274 return len(self.read(node))
278 return len(self.read(node))
275 else:
279 else:
276 return super(narrowfilelog, self).size(rev)
280 return super(narrowfilelog, self).size(rev)
277
281
278 def cmp(self, node, text):
282 def cmp(self, node, text):
279 # We don't call `super` because narrow parents can be buggy in case of a
283 # We don't call `super` because narrow parents can be buggy in case of a
280 # ambiguous dirstate. Always take the slow path until there is a better
284 # ambiguous dirstate. Always take the slow path until there is a better
281 # fix, see issue6150.
285 # fix, see issue6150.
282
286
283 # Censored files compare against the empty file.
287 # Censored files compare against the empty file.
284 if self.iscensored(self.rev(node)):
288 if self.iscensored(self.rev(node)):
285 return text != b''
289 return text != b''
286
290
287 return self.read(node) != text
291 return self.read(node) != text
@@ -1,802 +1,886
1 # censor code related to censoring revision
1 # censor code related to censoring revision
2 # coding: utf8
2 # coding: utf8
3 #
3 #
4 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
4 # Copyright 2021 Pierre-Yves David <pierre-yves.david@octobus.net>
5 # Copyright 2015 Google, Inc <martinvonz@google.com>
5 # Copyright 2015 Google, Inc <martinvonz@google.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 import binascii
10 import binascii
11 import contextlib
11 import contextlib
12 import os
12 import os
13 import struct
13 import struct
14
14
15 from ..node import (
15 from ..node import (
16 nullrev,
16 nullrev,
17 )
17 )
18 from .constants import (
18 from .constants import (
19 COMP_MODE_PLAIN,
19 COMP_MODE_PLAIN,
20 ENTRY_DATA_COMPRESSED_LENGTH,
20 ENTRY_DATA_COMPRESSED_LENGTH,
21 ENTRY_DATA_COMPRESSION_MODE,
21 ENTRY_DATA_COMPRESSION_MODE,
22 ENTRY_DATA_OFFSET,
22 ENTRY_DATA_OFFSET,
23 ENTRY_DATA_UNCOMPRESSED_LENGTH,
23 ENTRY_DATA_UNCOMPRESSED_LENGTH,
24 ENTRY_DELTA_BASE,
24 ENTRY_DELTA_BASE,
25 ENTRY_LINK_REV,
25 ENTRY_LINK_REV,
26 ENTRY_NODE_ID,
26 ENTRY_NODE_ID,
27 ENTRY_PARENT_1,
27 ENTRY_PARENT_1,
28 ENTRY_PARENT_2,
28 ENTRY_PARENT_2,
29 ENTRY_SIDEDATA_COMPRESSED_LENGTH,
29 ENTRY_SIDEDATA_COMPRESSED_LENGTH,
30 ENTRY_SIDEDATA_COMPRESSION_MODE,
30 ENTRY_SIDEDATA_COMPRESSION_MODE,
31 ENTRY_SIDEDATA_OFFSET,
31 ENTRY_SIDEDATA_OFFSET,
32 REVIDX_ISCENSORED,
32 REVLOGV0,
33 REVLOGV0,
33 REVLOGV1,
34 REVLOGV1,
34 )
35 )
35 from ..i18n import _
36 from ..i18n import _
36
37
37 from .. import (
38 from .. import (
38 error,
39 error,
40 mdiff,
39 pycompat,
41 pycompat,
40 revlogutils,
42 revlogutils,
41 util,
43 util,
42 )
44 )
43 from ..utils import (
45 from ..utils import (
44 storageutil,
46 storageutil,
45 )
47 )
46 from . import (
48 from . import (
47 constants,
49 constants,
48 deltas,
50 deltas,
49 )
51 )
50
52
51
53
52 def v1_censor(rl, tr, censornode, tombstone=b''):
54 def v1_censor(rl, tr, censornode, tombstone=b''):
53 """censors a revision in a "version 1" revlog"""
55 """censors a revision in a "version 1" revlog"""
54 assert rl._format_version == constants.REVLOGV1, rl._format_version
56 assert rl._format_version == constants.REVLOGV1, rl._format_version
55
57
56 # avoid cycle
58 # avoid cycle
57 from .. import revlog
59 from .. import revlog
58
60
59 censorrev = rl.rev(censornode)
61 censorrev = rl.rev(censornode)
60 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
62 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
61
63
62 # Rewriting the revlog in place is hard. Our strategy for censoring is
64 # Rewriting the revlog in place is hard. Our strategy for censoring is
63 # to create a new revlog, copy all revisions to it, then replace the
65 # to create a new revlog, copy all revisions to it, then replace the
64 # revlogs on transaction close.
66 # revlogs on transaction close.
65 #
67 #
66 # This is a bit dangerous. We could easily have a mismatch of state.
68 # This is a bit dangerous. We could easily have a mismatch of state.
67 newrl = revlog.revlog(
69 newrl = revlog.revlog(
68 rl.opener,
70 rl.opener,
69 target=rl.target,
71 target=rl.target,
70 radix=rl.radix,
72 radix=rl.radix,
71 postfix=b'tmpcensored',
73 postfix=b'tmpcensored',
72 censorable=True,
74 censorable=True,
73 )
75 )
74 newrl._format_version = rl._format_version
76 newrl._format_version = rl._format_version
75 newrl._format_flags = rl._format_flags
77 newrl._format_flags = rl._format_flags
76 newrl._generaldelta = rl._generaldelta
78 newrl._generaldelta = rl._generaldelta
77 newrl._parse_index = rl._parse_index
79 newrl._parse_index = rl._parse_index
78
80
79 for rev in rl.revs():
81 for rev in rl.revs():
80 node = rl.node(rev)
82 node = rl.node(rev)
81 p1, p2 = rl.parents(node)
83 p1, p2 = rl.parents(node)
82
84
83 if rev == censorrev:
85 if rev == censorrev:
84 newrl.addrawrevision(
86 newrl.addrawrevision(
85 tombstone,
87 tombstone,
86 tr,
88 tr,
87 rl.linkrev(censorrev),
89 rl.linkrev(censorrev),
88 p1,
90 p1,
89 p2,
91 p2,
90 censornode,
92 censornode,
91 constants.REVIDX_ISCENSORED,
93 constants.REVIDX_ISCENSORED,
92 )
94 )
93
95
94 if newrl.deltaparent(rev) != nullrev:
96 if newrl.deltaparent(rev) != nullrev:
95 m = _(b'censored revision stored as delta; cannot censor')
97 m = _(b'censored revision stored as delta; cannot censor')
96 h = _(
98 h = _(
97 b'censoring of revlogs is not fully implemented;'
99 b'censoring of revlogs is not fully implemented;'
98 b' please report this bug'
100 b' please report this bug'
99 )
101 )
100 raise error.Abort(m, hint=h)
102 raise error.Abort(m, hint=h)
101 continue
103 continue
102
104
103 if rl.iscensored(rev):
105 if rl.iscensored(rev):
104 if rl.deltaparent(rev) != nullrev:
106 if rl.deltaparent(rev) != nullrev:
105 m = _(
107 m = _(
106 b'cannot censor due to censored '
108 b'cannot censor due to censored '
107 b'revision having delta stored'
109 b'revision having delta stored'
108 )
110 )
109 raise error.Abort(m)
111 raise error.Abort(m)
110 rawtext = rl._chunk(rev)
112 rawtext = rl._chunk(rev)
111 else:
113 else:
112 rawtext = rl.rawdata(rev)
114 rawtext = rl.rawdata(rev)
113
115
114 newrl.addrawrevision(
116 newrl.addrawrevision(
115 rawtext, tr, rl.linkrev(rev), p1, p2, node, rl.flags(rev)
117 rawtext, tr, rl.linkrev(rev), p1, p2, node, rl.flags(rev)
116 )
118 )
117
119
118 tr.addbackup(rl._indexfile, location=b'store')
120 tr.addbackup(rl._indexfile, location=b'store')
119 if not rl._inline:
121 if not rl._inline:
120 tr.addbackup(rl._datafile, location=b'store')
122 tr.addbackup(rl._datafile, location=b'store')
121
123
122 rl.opener.rename(newrl._indexfile, rl._indexfile)
124 rl.opener.rename(newrl._indexfile, rl._indexfile)
123 if not rl._inline:
125 if not rl._inline:
124 rl.opener.rename(newrl._datafile, rl._datafile)
126 rl.opener.rename(newrl._datafile, rl._datafile)
125
127
126 rl.clearcaches()
128 rl.clearcaches()
127 rl._loadindex()
129 rl._loadindex()
128
130
129
131
130 def v2_censor(revlog, tr, censornode, tombstone=b''):
132 def v2_censor(revlog, tr, censornode, tombstone=b''):
131 """censors a revision in a "version 2" revlog"""
133 """censors a revision in a "version 2" revlog"""
132 assert revlog._format_version != REVLOGV0, revlog._format_version
134 assert revlog._format_version != REVLOGV0, revlog._format_version
133 assert revlog._format_version != REVLOGV1, revlog._format_version
135 assert revlog._format_version != REVLOGV1, revlog._format_version
134
136
135 censor_revs = {revlog.rev(censornode)}
137 censor_revs = {revlog.rev(censornode)}
136 _rewrite_v2(revlog, tr, censor_revs, tombstone)
138 _rewrite_v2(revlog, tr, censor_revs, tombstone)
137
139
138
140
139 def _rewrite_v2(revlog, tr, censor_revs, tombstone=b''):
141 def _rewrite_v2(revlog, tr, censor_revs, tombstone=b''):
140 """rewrite a revlog to censor some of its content
142 """rewrite a revlog to censor some of its content
141
143
142 General principle
144 General principle
143
145
144 We create new revlog files (index/data/sidedata) to copy the content of
146 We create new revlog files (index/data/sidedata) to copy the content of
145 the existing data without the censored data.
147 the existing data without the censored data.
146
148
147 We need to recompute new delta for any revision that used the censored
149 We need to recompute new delta for any revision that used the censored
148 revision as delta base. As the cumulative size of the new delta may be
150 revision as delta base. As the cumulative size of the new delta may be
149 large, we store them in a temporary file until they are stored in their
151 large, we store them in a temporary file until they are stored in their
150 final destination.
152 final destination.
151
153
152 All data before the censored data can be blindly copied. The rest needs
154 All data before the censored data can be blindly copied. The rest needs
153 to be copied as we go and the associated index entry needs adjustement.
155 to be copied as we go and the associated index entry needs adjustement.
154 """
156 """
155 assert revlog._format_version != REVLOGV0, revlog._format_version
157 assert revlog._format_version != REVLOGV0, revlog._format_version
156 assert revlog._format_version != REVLOGV1, revlog._format_version
158 assert revlog._format_version != REVLOGV1, revlog._format_version
157
159
158 old_index = revlog.index
160 old_index = revlog.index
159 docket = revlog._docket
161 docket = revlog._docket
160
162
161 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
163 tombstone = storageutil.packmeta({b'censored': tombstone}, b'')
162
164
163 first_excl_rev = min(censor_revs)
165 first_excl_rev = min(censor_revs)
164
166
165 first_excl_entry = revlog.index[first_excl_rev]
167 first_excl_entry = revlog.index[first_excl_rev]
166 index_cutoff = revlog.index.entry_size * first_excl_rev
168 index_cutoff = revlog.index.entry_size * first_excl_rev
167 data_cutoff = first_excl_entry[ENTRY_DATA_OFFSET] >> 16
169 data_cutoff = first_excl_entry[ENTRY_DATA_OFFSET] >> 16
168 sidedata_cutoff = revlog.sidedata_cut_off(first_excl_rev)
170 sidedata_cutoff = revlog.sidedata_cut_off(first_excl_rev)
169
171
170 with pycompat.unnamedtempfile(mode=b"w+b") as tmp_storage:
172 with pycompat.unnamedtempfile(mode=b"w+b") as tmp_storage:
171 # rev → (new_base, data_start, data_end, compression_mode)
173 # rev → (new_base, data_start, data_end, compression_mode)
172 rewritten_entries = _precompute_rewritten_delta(
174 rewritten_entries = _precompute_rewritten_delta(
173 revlog,
175 revlog,
174 old_index,
176 old_index,
175 censor_revs,
177 censor_revs,
176 tmp_storage,
178 tmp_storage,
177 )
179 )
178
180
179 all_files = _setup_new_files(
181 all_files = _setup_new_files(
180 revlog,
182 revlog,
181 index_cutoff,
183 index_cutoff,
182 data_cutoff,
184 data_cutoff,
183 sidedata_cutoff,
185 sidedata_cutoff,
184 )
186 )
185
187
186 # we dont need to open the old index file since its content already
188 # we dont need to open the old index file since its content already
187 # exist in a usable form in `old_index`.
189 # exist in a usable form in `old_index`.
188 with all_files() as open_files:
190 with all_files() as open_files:
189 (
191 (
190 old_data_file,
192 old_data_file,
191 old_sidedata_file,
193 old_sidedata_file,
192 new_index_file,
194 new_index_file,
193 new_data_file,
195 new_data_file,
194 new_sidedata_file,
196 new_sidedata_file,
195 ) = open_files
197 ) = open_files
196
198
197 # writing the censored revision
199 # writing the censored revision
198
200
199 # Writing all subsequent revisions
201 # Writing all subsequent revisions
200 for rev in range(first_excl_rev, len(old_index)):
202 for rev in range(first_excl_rev, len(old_index)):
201 if rev in censor_revs:
203 if rev in censor_revs:
202 _rewrite_censor(
204 _rewrite_censor(
203 revlog,
205 revlog,
204 old_index,
206 old_index,
205 open_files,
207 open_files,
206 rev,
208 rev,
207 tombstone,
209 tombstone,
208 )
210 )
209 else:
211 else:
210 _rewrite_simple(
212 _rewrite_simple(
211 revlog,
213 revlog,
212 old_index,
214 old_index,
213 open_files,
215 open_files,
214 rev,
216 rev,
215 rewritten_entries,
217 rewritten_entries,
216 tmp_storage,
218 tmp_storage,
217 )
219 )
218 docket.write(transaction=None, stripping=True)
220 docket.write(transaction=None, stripping=True)
219
221
220
222
221 def _precompute_rewritten_delta(
223 def _precompute_rewritten_delta(
222 revlog,
224 revlog,
223 old_index,
225 old_index,
224 excluded_revs,
226 excluded_revs,
225 tmp_storage,
227 tmp_storage,
226 ):
228 ):
227 """Compute new delta for revisions whose delta is based on revision that
229 """Compute new delta for revisions whose delta is based on revision that
228 will not survive as is.
230 will not survive as is.
229
231
230 Return a mapping: {rev → (new_base, data_start, data_end, compression_mode)}
232 Return a mapping: {rev → (new_base, data_start, data_end, compression_mode)}
231 """
233 """
232 dc = deltas.deltacomputer(revlog)
234 dc = deltas.deltacomputer(revlog)
233 rewritten_entries = {}
235 rewritten_entries = {}
234 first_excl_rev = min(excluded_revs)
236 first_excl_rev = min(excluded_revs)
235 with revlog._segmentfile._open_read() as dfh:
237 with revlog._segmentfile._open_read() as dfh:
236 for rev in range(first_excl_rev, len(old_index)):
238 for rev in range(first_excl_rev, len(old_index)):
237 if rev in excluded_revs:
239 if rev in excluded_revs:
238 # this revision will be preserved as is, so we don't need to
240 # this revision will be preserved as is, so we don't need to
239 # consider recomputing a delta.
241 # consider recomputing a delta.
240 continue
242 continue
241 entry = old_index[rev]
243 entry = old_index[rev]
242 if entry[ENTRY_DELTA_BASE] not in excluded_revs:
244 if entry[ENTRY_DELTA_BASE] not in excluded_revs:
243 continue
245 continue
244 # This is a revision that use the censored revision as the base
246 # This is a revision that use the censored revision as the base
245 # for its delta. We need a need new deltas
247 # for its delta. We need a need new deltas
246 if entry[ENTRY_DATA_UNCOMPRESSED_LENGTH] == 0:
248 if entry[ENTRY_DATA_UNCOMPRESSED_LENGTH] == 0:
247 # this revision is empty, we can delta against nullrev
249 # this revision is empty, we can delta against nullrev
248 rewritten_entries[rev] = (nullrev, 0, 0, COMP_MODE_PLAIN)
250 rewritten_entries[rev] = (nullrev, 0, 0, COMP_MODE_PLAIN)
249 else:
251 else:
250
252
251 text = revlog.rawdata(rev, _df=dfh)
253 text = revlog.rawdata(rev, _df=dfh)
252 info = revlogutils.revisioninfo(
254 info = revlogutils.revisioninfo(
253 node=entry[ENTRY_NODE_ID],
255 node=entry[ENTRY_NODE_ID],
254 p1=revlog.node(entry[ENTRY_PARENT_1]),
256 p1=revlog.node(entry[ENTRY_PARENT_1]),
255 p2=revlog.node(entry[ENTRY_PARENT_2]),
257 p2=revlog.node(entry[ENTRY_PARENT_2]),
256 btext=[text],
258 btext=[text],
257 textlen=len(text),
259 textlen=len(text),
258 cachedelta=None,
260 cachedelta=None,
259 flags=entry[ENTRY_DATA_OFFSET] & 0xFFFF,
261 flags=entry[ENTRY_DATA_OFFSET] & 0xFFFF,
260 )
262 )
261 d = dc.finddeltainfo(
263 d = dc.finddeltainfo(
262 info, dfh, excluded_bases=excluded_revs, target_rev=rev
264 info, dfh, excluded_bases=excluded_revs, target_rev=rev
263 )
265 )
264 default_comp = revlog._docket.default_compression_header
266 default_comp = revlog._docket.default_compression_header
265 comp_mode, d = deltas.delta_compression(default_comp, d)
267 comp_mode, d = deltas.delta_compression(default_comp, d)
266 # using `tell` is a bit lazy, but we are not here for speed
268 # using `tell` is a bit lazy, but we are not here for speed
267 start = tmp_storage.tell()
269 start = tmp_storage.tell()
268 tmp_storage.write(d.data[1])
270 tmp_storage.write(d.data[1])
269 end = tmp_storage.tell()
271 end = tmp_storage.tell()
270 rewritten_entries[rev] = (d.base, start, end, comp_mode)
272 rewritten_entries[rev] = (d.base, start, end, comp_mode)
271 return rewritten_entries
273 return rewritten_entries
272
274
273
275
274 def _setup_new_files(
276 def _setup_new_files(
275 revlog,
277 revlog,
276 index_cutoff,
278 index_cutoff,
277 data_cutoff,
279 data_cutoff,
278 sidedata_cutoff,
280 sidedata_cutoff,
279 ):
281 ):
280 """
282 """
281
283
282 return a context manager to open all the relevant files:
284 return a context manager to open all the relevant files:
283 - old_data_file,
285 - old_data_file,
284 - old_sidedata_file,
286 - old_sidedata_file,
285 - new_index_file,
287 - new_index_file,
286 - new_data_file,
288 - new_data_file,
287 - new_sidedata_file,
289 - new_sidedata_file,
288
290
289 The old_index_file is not here because it is accessed through the
291 The old_index_file is not here because it is accessed through the
290 `old_index` object if the caller function.
292 `old_index` object if the caller function.
291 """
293 """
292 docket = revlog._docket
294 docket = revlog._docket
293 old_index_filepath = revlog.opener.join(docket.index_filepath())
295 old_index_filepath = revlog.opener.join(docket.index_filepath())
294 old_data_filepath = revlog.opener.join(docket.data_filepath())
296 old_data_filepath = revlog.opener.join(docket.data_filepath())
295 old_sidedata_filepath = revlog.opener.join(docket.sidedata_filepath())
297 old_sidedata_filepath = revlog.opener.join(docket.sidedata_filepath())
296
298
297 new_index_filepath = revlog.opener.join(docket.new_index_file())
299 new_index_filepath = revlog.opener.join(docket.new_index_file())
298 new_data_filepath = revlog.opener.join(docket.new_data_file())
300 new_data_filepath = revlog.opener.join(docket.new_data_file())
299 new_sidedata_filepath = revlog.opener.join(docket.new_sidedata_file())
301 new_sidedata_filepath = revlog.opener.join(docket.new_sidedata_file())
300
302
301 util.copyfile(old_index_filepath, new_index_filepath, nb_bytes=index_cutoff)
303 util.copyfile(old_index_filepath, new_index_filepath, nb_bytes=index_cutoff)
302 util.copyfile(old_data_filepath, new_data_filepath, nb_bytes=data_cutoff)
304 util.copyfile(old_data_filepath, new_data_filepath, nb_bytes=data_cutoff)
303 util.copyfile(
305 util.copyfile(
304 old_sidedata_filepath,
306 old_sidedata_filepath,
305 new_sidedata_filepath,
307 new_sidedata_filepath,
306 nb_bytes=sidedata_cutoff,
308 nb_bytes=sidedata_cutoff,
307 )
309 )
308 revlog.opener.register_file(docket.index_filepath())
310 revlog.opener.register_file(docket.index_filepath())
309 revlog.opener.register_file(docket.data_filepath())
311 revlog.opener.register_file(docket.data_filepath())
310 revlog.opener.register_file(docket.sidedata_filepath())
312 revlog.opener.register_file(docket.sidedata_filepath())
311
313
312 docket.index_end = index_cutoff
314 docket.index_end = index_cutoff
313 docket.data_end = data_cutoff
315 docket.data_end = data_cutoff
314 docket.sidedata_end = sidedata_cutoff
316 docket.sidedata_end = sidedata_cutoff
315
317
316 # reload the revlog internal information
318 # reload the revlog internal information
317 revlog.clearcaches()
319 revlog.clearcaches()
318 revlog._loadindex(docket=docket)
320 revlog._loadindex(docket=docket)
319
321
320 @contextlib.contextmanager
322 @contextlib.contextmanager
321 def all_files_opener():
323 def all_files_opener():
322 # hide opening in an helper function to please check-code, black
324 # hide opening in an helper function to please check-code, black
323 # and various python version at the same time
325 # and various python version at the same time
324 with open(old_data_filepath, 'rb') as old_data_file:
326 with open(old_data_filepath, 'rb') as old_data_file:
325 with open(old_sidedata_filepath, 'rb') as old_sidedata_file:
327 with open(old_sidedata_filepath, 'rb') as old_sidedata_file:
326 with open(new_index_filepath, 'r+b') as new_index_file:
328 with open(new_index_filepath, 'r+b') as new_index_file:
327 with open(new_data_filepath, 'r+b') as new_data_file:
329 with open(new_data_filepath, 'r+b') as new_data_file:
328 with open(
330 with open(
329 new_sidedata_filepath, 'r+b'
331 new_sidedata_filepath, 'r+b'
330 ) as new_sidedata_file:
332 ) as new_sidedata_file:
331 new_index_file.seek(0, os.SEEK_END)
333 new_index_file.seek(0, os.SEEK_END)
332 assert new_index_file.tell() == index_cutoff
334 assert new_index_file.tell() == index_cutoff
333 new_data_file.seek(0, os.SEEK_END)
335 new_data_file.seek(0, os.SEEK_END)
334 assert new_data_file.tell() == data_cutoff
336 assert new_data_file.tell() == data_cutoff
335 new_sidedata_file.seek(0, os.SEEK_END)
337 new_sidedata_file.seek(0, os.SEEK_END)
336 assert new_sidedata_file.tell() == sidedata_cutoff
338 assert new_sidedata_file.tell() == sidedata_cutoff
337 yield (
339 yield (
338 old_data_file,
340 old_data_file,
339 old_sidedata_file,
341 old_sidedata_file,
340 new_index_file,
342 new_index_file,
341 new_data_file,
343 new_data_file,
342 new_sidedata_file,
344 new_sidedata_file,
343 )
345 )
344
346
345 return all_files_opener
347 return all_files_opener
346
348
347
349
348 def _rewrite_simple(
350 def _rewrite_simple(
349 revlog,
351 revlog,
350 old_index,
352 old_index,
351 all_files,
353 all_files,
352 rev,
354 rev,
353 rewritten_entries,
355 rewritten_entries,
354 tmp_storage,
356 tmp_storage,
355 ):
357 ):
356 """append a normal revision to the index after the rewritten one(s)"""
358 """append a normal revision to the index after the rewritten one(s)"""
357 (
359 (
358 old_data_file,
360 old_data_file,
359 old_sidedata_file,
361 old_sidedata_file,
360 new_index_file,
362 new_index_file,
361 new_data_file,
363 new_data_file,
362 new_sidedata_file,
364 new_sidedata_file,
363 ) = all_files
365 ) = all_files
364 entry = old_index[rev]
366 entry = old_index[rev]
365 flags = entry[ENTRY_DATA_OFFSET] & 0xFFFF
367 flags = entry[ENTRY_DATA_OFFSET] & 0xFFFF
366 old_data_offset = entry[ENTRY_DATA_OFFSET] >> 16
368 old_data_offset = entry[ENTRY_DATA_OFFSET] >> 16
367
369
368 if rev not in rewritten_entries:
370 if rev not in rewritten_entries:
369 old_data_file.seek(old_data_offset)
371 old_data_file.seek(old_data_offset)
370 new_data_size = entry[ENTRY_DATA_COMPRESSED_LENGTH]
372 new_data_size = entry[ENTRY_DATA_COMPRESSED_LENGTH]
371 new_data = old_data_file.read(new_data_size)
373 new_data = old_data_file.read(new_data_size)
372 data_delta_base = entry[ENTRY_DELTA_BASE]
374 data_delta_base = entry[ENTRY_DELTA_BASE]
373 d_comp_mode = entry[ENTRY_DATA_COMPRESSION_MODE]
375 d_comp_mode = entry[ENTRY_DATA_COMPRESSION_MODE]
374 else:
376 else:
375 (
377 (
376 data_delta_base,
378 data_delta_base,
377 start,
379 start,
378 end,
380 end,
379 d_comp_mode,
381 d_comp_mode,
380 ) = rewritten_entries[rev]
382 ) = rewritten_entries[rev]
381 new_data_size = end - start
383 new_data_size = end - start
382 tmp_storage.seek(start)
384 tmp_storage.seek(start)
383 new_data = tmp_storage.read(new_data_size)
385 new_data = tmp_storage.read(new_data_size)
384
386
385 # It might be faster to group continuous read/write operation,
387 # It might be faster to group continuous read/write operation,
386 # however, this is censor, an operation that is not focussed
388 # however, this is censor, an operation that is not focussed
387 # around stellar performance. So I have not written this
389 # around stellar performance. So I have not written this
388 # optimisation yet.
390 # optimisation yet.
389 new_data_offset = new_data_file.tell()
391 new_data_offset = new_data_file.tell()
390 new_data_file.write(new_data)
392 new_data_file.write(new_data)
391
393
392 sidedata_size = entry[ENTRY_SIDEDATA_COMPRESSED_LENGTH]
394 sidedata_size = entry[ENTRY_SIDEDATA_COMPRESSED_LENGTH]
393 new_sidedata_offset = new_sidedata_file.tell()
395 new_sidedata_offset = new_sidedata_file.tell()
394 if 0 < sidedata_size:
396 if 0 < sidedata_size:
395 old_sidedata_offset = entry[ENTRY_SIDEDATA_OFFSET]
397 old_sidedata_offset = entry[ENTRY_SIDEDATA_OFFSET]
396 old_sidedata_file.seek(old_sidedata_offset)
398 old_sidedata_file.seek(old_sidedata_offset)
397 new_sidedata = old_sidedata_file.read(sidedata_size)
399 new_sidedata = old_sidedata_file.read(sidedata_size)
398 new_sidedata_file.write(new_sidedata)
400 new_sidedata_file.write(new_sidedata)
399
401
400 data_uncompressed_length = entry[ENTRY_DATA_UNCOMPRESSED_LENGTH]
402 data_uncompressed_length = entry[ENTRY_DATA_UNCOMPRESSED_LENGTH]
401 sd_com_mode = entry[ENTRY_SIDEDATA_COMPRESSION_MODE]
403 sd_com_mode = entry[ENTRY_SIDEDATA_COMPRESSION_MODE]
402 assert data_delta_base <= rev, (data_delta_base, rev)
404 assert data_delta_base <= rev, (data_delta_base, rev)
403
405
404 new_entry = revlogutils.entry(
406 new_entry = revlogutils.entry(
405 flags=flags,
407 flags=flags,
406 data_offset=new_data_offset,
408 data_offset=new_data_offset,
407 data_compressed_length=new_data_size,
409 data_compressed_length=new_data_size,
408 data_uncompressed_length=data_uncompressed_length,
410 data_uncompressed_length=data_uncompressed_length,
409 data_delta_base=data_delta_base,
411 data_delta_base=data_delta_base,
410 link_rev=entry[ENTRY_LINK_REV],
412 link_rev=entry[ENTRY_LINK_REV],
411 parent_rev_1=entry[ENTRY_PARENT_1],
413 parent_rev_1=entry[ENTRY_PARENT_1],
412 parent_rev_2=entry[ENTRY_PARENT_2],
414 parent_rev_2=entry[ENTRY_PARENT_2],
413 node_id=entry[ENTRY_NODE_ID],
415 node_id=entry[ENTRY_NODE_ID],
414 sidedata_offset=new_sidedata_offset,
416 sidedata_offset=new_sidedata_offset,
415 sidedata_compressed_length=sidedata_size,
417 sidedata_compressed_length=sidedata_size,
416 data_compression_mode=d_comp_mode,
418 data_compression_mode=d_comp_mode,
417 sidedata_compression_mode=sd_com_mode,
419 sidedata_compression_mode=sd_com_mode,
418 )
420 )
419 revlog.index.append(new_entry)
421 revlog.index.append(new_entry)
420 entry_bin = revlog.index.entry_binary(rev)
422 entry_bin = revlog.index.entry_binary(rev)
421 new_index_file.write(entry_bin)
423 new_index_file.write(entry_bin)
422
424
423 revlog._docket.index_end = new_index_file.tell()
425 revlog._docket.index_end = new_index_file.tell()
424 revlog._docket.data_end = new_data_file.tell()
426 revlog._docket.data_end = new_data_file.tell()
425 revlog._docket.sidedata_end = new_sidedata_file.tell()
427 revlog._docket.sidedata_end = new_sidedata_file.tell()
426
428
427
429
428 def _rewrite_censor(
430 def _rewrite_censor(
429 revlog,
431 revlog,
430 old_index,
432 old_index,
431 all_files,
433 all_files,
432 rev,
434 rev,
433 tombstone,
435 tombstone,
434 ):
436 ):
435 """rewrite and append a censored revision"""
437 """rewrite and append a censored revision"""
436 (
438 (
437 old_data_file,
439 old_data_file,
438 old_sidedata_file,
440 old_sidedata_file,
439 new_index_file,
441 new_index_file,
440 new_data_file,
442 new_data_file,
441 new_sidedata_file,
443 new_sidedata_file,
442 ) = all_files
444 ) = all_files
443 entry = old_index[rev]
445 entry = old_index[rev]
444
446
445 # XXX consider trying the default compression too
447 # XXX consider trying the default compression too
446 new_data_size = len(tombstone)
448 new_data_size = len(tombstone)
447 new_data_offset = new_data_file.tell()
449 new_data_offset = new_data_file.tell()
448 new_data_file.write(tombstone)
450 new_data_file.write(tombstone)
449
451
450 # we are not adding any sidedata as they might leak info about the censored version
452 # we are not adding any sidedata as they might leak info about the censored version
451
453
452 link_rev = entry[ENTRY_LINK_REV]
454 link_rev = entry[ENTRY_LINK_REV]
453
455
454 p1 = entry[ENTRY_PARENT_1]
456 p1 = entry[ENTRY_PARENT_1]
455 p2 = entry[ENTRY_PARENT_2]
457 p2 = entry[ENTRY_PARENT_2]
456
458
457 new_entry = revlogutils.entry(
459 new_entry = revlogutils.entry(
458 flags=constants.REVIDX_ISCENSORED,
460 flags=constants.REVIDX_ISCENSORED,
459 data_offset=new_data_offset,
461 data_offset=new_data_offset,
460 data_compressed_length=new_data_size,
462 data_compressed_length=new_data_size,
461 data_uncompressed_length=new_data_size,
463 data_uncompressed_length=new_data_size,
462 data_delta_base=rev,
464 data_delta_base=rev,
463 link_rev=link_rev,
465 link_rev=link_rev,
464 parent_rev_1=p1,
466 parent_rev_1=p1,
465 parent_rev_2=p2,
467 parent_rev_2=p2,
466 node_id=entry[ENTRY_NODE_ID],
468 node_id=entry[ENTRY_NODE_ID],
467 sidedata_offset=0,
469 sidedata_offset=0,
468 sidedata_compressed_length=0,
470 sidedata_compressed_length=0,
469 data_compression_mode=COMP_MODE_PLAIN,
471 data_compression_mode=COMP_MODE_PLAIN,
470 sidedata_compression_mode=COMP_MODE_PLAIN,
472 sidedata_compression_mode=COMP_MODE_PLAIN,
471 )
473 )
472 revlog.index.append(new_entry)
474 revlog.index.append(new_entry)
473 entry_bin = revlog.index.entry_binary(rev)
475 entry_bin = revlog.index.entry_binary(rev)
474 new_index_file.write(entry_bin)
476 new_index_file.write(entry_bin)
475 revlog._docket.index_end = new_index_file.tell()
477 revlog._docket.index_end = new_index_file.tell()
476 revlog._docket.data_end = new_data_file.tell()
478 revlog._docket.data_end = new_data_file.tell()
477
479
478
480
479 def _get_filename_from_filelog_index(path):
481 def _get_filename_from_filelog_index(path):
480 # Drop the extension and the `data/` prefix
482 # Drop the extension and the `data/` prefix
481 path_part = path.rsplit(b'.', 1)[0].split(b'/', 1)
483 path_part = path.rsplit(b'.', 1)[0].split(b'/', 1)
482 if len(path_part) < 2:
484 if len(path_part) < 2:
483 msg = _(b"cannot recognize filelog from filename: '%s'")
485 msg = _(b"cannot recognize filelog from filename: '%s'")
484 msg %= path
486 msg %= path
485 raise error.Abort(msg)
487 raise error.Abort(msg)
486
488
487 return path_part[1]
489 return path_part[1]
488
490
489
491
490 def _filelog_from_filename(repo, path):
492 def _filelog_from_filename(repo, path):
491 """Returns the filelog for the given `path`. Stolen from `engine.py`"""
493 """Returns the filelog for the given `path`. Stolen from `engine.py`"""
492
494
493 from .. import filelog # avoid cycle
495 from .. import filelog # avoid cycle
494
496
495 fl = filelog.filelog(repo.svfs, path)
497 fl = filelog.filelog(repo.svfs, path)
496 return fl
498 return fl
497
499
498
500
499 def _write_swapped_parents(repo, rl, rev, offset, fp):
501 def _write_swapped_parents(repo, rl, rev, offset, fp):
500 """Swaps p1 and p2 and overwrites the revlog entry for `rev` in `fp`"""
502 """Swaps p1 and p2 and overwrites the revlog entry for `rev` in `fp`"""
501 from ..pure import parsers # avoid cycle
503 from ..pure import parsers # avoid cycle
502
504
503 if repo._currentlock(repo._lockref) is None:
505 if repo._currentlock(repo._lockref) is None:
504 # Let's be paranoid about it
506 # Let's be paranoid about it
505 msg = "repo needs to be locked to rewrite parents"
507 msg = "repo needs to be locked to rewrite parents"
506 raise error.ProgrammingError(msg)
508 raise error.ProgrammingError(msg)
507
509
508 index_format = parsers.IndexObject.index_format
510 index_format = parsers.IndexObject.index_format
509 entry = rl.index[rev]
511 entry = rl.index[rev]
510 new_entry = list(entry)
512 new_entry = list(entry)
511 new_entry[5], new_entry[6] = entry[6], entry[5]
513 new_entry[5], new_entry[6] = entry[6], entry[5]
512 packed = index_format.pack(*new_entry[:8])
514 packed = index_format.pack(*new_entry[:8])
513 fp.seek(offset)
515 fp.seek(offset)
514 fp.write(packed)
516 fp.write(packed)
515
517
516
518
517 def _reorder_filelog_parents(repo, fl, to_fix):
519 def _reorder_filelog_parents(repo, fl, to_fix):
518 """
520 """
519 Swaps p1 and p2 for all `to_fix` revisions of filelog `fl` and writes the
521 Swaps p1 and p2 for all `to_fix` revisions of filelog `fl` and writes the
520 new version to disk, overwriting the old one with a rename.
522 new version to disk, overwriting the old one with a rename.
521 """
523 """
522 from ..pure import parsers # avoid cycle
524 from ..pure import parsers # avoid cycle
523
525
524 ui = repo.ui
526 ui = repo.ui
525 assert len(to_fix) > 0
527 assert len(to_fix) > 0
526 rl = fl._revlog
528 rl = fl._revlog
527 if rl._format_version != constants.REVLOGV1:
529 if rl._format_version != constants.REVLOGV1:
528 msg = "expected version 1 revlog, got version '%d'" % rl._format_version
530 msg = "expected version 1 revlog, got version '%d'" % rl._format_version
529 raise error.ProgrammingError(msg)
531 raise error.ProgrammingError(msg)
530
532
531 index_file = rl._indexfile
533 index_file = rl._indexfile
532 new_file_path = index_file + b'.tmp-parents-fix'
534 new_file_path = index_file + b'.tmp-parents-fix'
533 repaired_msg = _(b"repaired revision %d of 'filelog %s'\n")
535 repaired_msg = _(b"repaired revision %d of 'filelog %s'\n")
534
536
535 with ui.uninterruptible():
537 with ui.uninterruptible():
536 try:
538 try:
537 util.copyfile(
539 util.copyfile(
538 rl.opener.join(index_file),
540 rl.opener.join(index_file),
539 rl.opener.join(new_file_path),
541 rl.opener.join(new_file_path),
540 checkambig=rl._checkambig,
542 checkambig=rl._checkambig,
541 )
543 )
542
544
543 with rl.opener(new_file_path, mode=b"r+") as fp:
545 with rl.opener(new_file_path, mode=b"r+") as fp:
544 if rl._inline:
546 if rl._inline:
545 index = parsers.InlinedIndexObject(fp.read())
547 index = parsers.InlinedIndexObject(fp.read())
546 for rev in fl.revs():
548 for rev in fl.revs():
547 if rev in to_fix:
549 if rev in to_fix:
548 offset = index._calculate_index(rev)
550 offset = index._calculate_index(rev)
549 _write_swapped_parents(repo, rl, rev, offset, fp)
551 _write_swapped_parents(repo, rl, rev, offset, fp)
550 ui.write(repaired_msg % (rev, index_file))
552 ui.write(repaired_msg % (rev, index_file))
551 else:
553 else:
552 index_format = parsers.IndexObject.index_format
554 index_format = parsers.IndexObject.index_format
553 for rev in to_fix:
555 for rev in to_fix:
554 offset = rev * index_format.size
556 offset = rev * index_format.size
555 _write_swapped_parents(repo, rl, rev, offset, fp)
557 _write_swapped_parents(repo, rl, rev, offset, fp)
556 ui.write(repaired_msg % (rev, index_file))
558 ui.write(repaired_msg % (rev, index_file))
557
559
558 rl.opener.rename(new_file_path, index_file)
560 rl.opener.rename(new_file_path, index_file)
559 rl.clearcaches()
561 rl.clearcaches()
560 rl._loadindex()
562 rl._loadindex()
561 finally:
563 finally:
562 util.tryunlink(new_file_path)
564 util.tryunlink(new_file_path)
563
565
564
566
565 def _is_revision_affected(fl, filerev, metadata_cache=None):
567 def _is_revision_affected(fl, filerev, metadata_cache=None):
566 full_text = lambda: fl._revlog.rawdata(filerev)
568 full_text = lambda: fl._revlog.rawdata(filerev)
567 parent_revs = lambda: fl._revlog.parentrevs(filerev)
569 parent_revs = lambda: fl._revlog.parentrevs(filerev)
568 return _is_revision_affected_inner(
570 return _is_revision_affected_inner(
569 full_text, parent_revs, filerev, metadata_cache
571 full_text, parent_revs, filerev, metadata_cache
570 )
572 )
571
573
572
574
573 def _is_revision_affected_inner(
575 def _is_revision_affected_inner(
574 full_text,
576 full_text,
575 parents_revs,
577 parents_revs,
576 filerev,
578 filerev,
577 metadata_cache=None,
579 metadata_cache=None,
578 ):
580 ):
579 """Mercurial currently (5.9rc0) uses `p1 == nullrev and p2 != nullrev` as a
581 """Mercurial currently (5.9rc0) uses `p1 == nullrev and p2 != nullrev` as a
580 special meaning compared to the reverse in the context of filelog-based
582 special meaning compared to the reverse in the context of filelog-based
581 copytracing. issue6528 exists because new code assumed that parent ordering
583 copytracing. issue6528 exists because new code assumed that parent ordering
582 didn't matter, so this detects if the revision contains metadata (since
584 didn't matter, so this detects if the revision contains metadata (since
583 it's only used for filelog-based copytracing) and its parents are in the
585 it's only used for filelog-based copytracing) and its parents are in the
584 "wrong" order."""
586 "wrong" order."""
585 try:
587 try:
586 raw_text = full_text()
588 raw_text = full_text()
587 except error.CensoredNodeError:
589 except error.CensoredNodeError:
588 # We don't care about censored nodes as they never carry metadata
590 # We don't care about censored nodes as they never carry metadata
589 return False
591 return False
590 has_meta = raw_text.startswith(b'\x01\n')
592 has_meta = raw_text.startswith(b'\x01\n')
591 if metadata_cache is not None:
593 if metadata_cache is not None:
592 metadata_cache[filerev] = has_meta
594 metadata_cache[filerev] = has_meta
593 if has_meta:
595 if has_meta:
594 (p1, p2) = parents_revs()
596 (p1, p2) = parents_revs()
595 if p1 != nullrev and p2 == nullrev:
597 if p1 != nullrev and p2 == nullrev:
596 return True
598 return True
597 return False
599 return False
598
600
599
601
600 def _is_revision_affected_fast(repo, fl, filerev, metadata_cache):
602 def _is_revision_affected_fast(repo, fl, filerev, metadata_cache):
601 rl = fl._revlog
603 rl = fl._revlog
602 is_censored = lambda: rl.iscensored(filerev)
604 is_censored = lambda: rl.iscensored(filerev)
603 delta_base = lambda: rl.deltaparent(filerev)
605 delta_base = lambda: rl.deltaparent(filerev)
604 delta = lambda: rl._chunk(filerev)
606 delta = lambda: rl._chunk(filerev)
605 full_text = lambda: rl.rawdata(filerev)
607 full_text = lambda: rl.rawdata(filerev)
606 parent_revs = lambda: rl.parentrevs(filerev)
608 parent_revs = lambda: rl.parentrevs(filerev)
607 return _is_revision_affected_fast_inner(
609 return _is_revision_affected_fast_inner(
608 is_censored,
610 is_censored,
609 delta_base,
611 delta_base,
610 delta,
612 delta,
611 full_text,
613 full_text,
612 parent_revs,
614 parent_revs,
613 filerev,
615 filerev,
614 metadata_cache,
616 metadata_cache,
615 )
617 )
616
618
617
619
618 def _is_revision_affected_fast_inner(
620 def _is_revision_affected_fast_inner(
619 is_censored,
621 is_censored,
620 delta_base,
622 delta_base,
621 delta,
623 delta,
622 full_text,
624 full_text,
623 parent_revs,
625 parent_revs,
624 filerev,
626 filerev,
625 metadata_cache,
627 metadata_cache,
626 ):
628 ):
627 """Optimization fast-path for `_is_revision_affected`.
629 """Optimization fast-path for `_is_revision_affected`.
628
630
629 `metadata_cache` is a dict of `{rev: has_metadata}` which allows any
631 `metadata_cache` is a dict of `{rev: has_metadata}` which allows any
630 revision to check if its base has metadata, saving computation of the full
632 revision to check if its base has metadata, saving computation of the full
631 text, instead looking at the current delta.
633 text, instead looking at the current delta.
632
634
633 This optimization only works if the revisions are looked at in order."""
635 This optimization only works if the revisions are looked at in order."""
634
636
635 if is_censored():
637 if is_censored():
636 # Censored revisions don't contain metadata, so they cannot be affected
638 # Censored revisions don't contain metadata, so they cannot be affected
637 metadata_cache[filerev] = False
639 metadata_cache[filerev] = False
638 return False
640 return False
639
641
640 p1, p2 = parent_revs()
642 p1, p2 = parent_revs()
641 if p1 == nullrev or p2 != nullrev:
643 if p1 == nullrev or p2 != nullrev:
642 return False
644 return False
643
645
644 delta_parent = delta_base()
646 delta_parent = delta_base()
645 parent_has_metadata = metadata_cache.get(delta_parent)
647 parent_has_metadata = metadata_cache.get(delta_parent)
646 if parent_has_metadata is None:
648 if parent_has_metadata is None:
647 return _is_revision_affected_inner(
649 return _is_revision_affected_inner(
648 full_text,
650 full_text,
649 parent_revs,
651 parent_revs,
650 filerev,
652 filerev,
651 metadata_cache,
653 metadata_cache,
652 )
654 )
653
655
654 chunk = delta()
656 chunk = delta()
655 if not len(chunk):
657 if not len(chunk):
656 # No diff for this revision
658 # No diff for this revision
657 return parent_has_metadata
659 return parent_has_metadata
658
660
659 header_length = 12
661 header_length = 12
660 if len(chunk) < header_length:
662 if len(chunk) < header_length:
661 raise error.Abort(_(b"patch cannot be decoded"))
663 raise error.Abort(_(b"patch cannot be decoded"))
662
664
663 start, _end, _length = struct.unpack(b">lll", chunk[:header_length])
665 start, _end, _length = struct.unpack(b">lll", chunk[:header_length])
664
666
665 if start < 2: # len(b'\x01\n') == 2
667 if start < 2: # len(b'\x01\n') == 2
666 # This delta does *something* to the metadata marker (if any).
668 # This delta does *something* to the metadata marker (if any).
667 # Check it the slow way
669 # Check it the slow way
668 is_affected = _is_revision_affected_inner(
670 is_affected = _is_revision_affected_inner(
669 full_text,
671 full_text,
670 parent_revs,
672 parent_revs,
671 filerev,
673 filerev,
672 metadata_cache,
674 metadata_cache,
673 )
675 )
674 return is_affected
676 return is_affected
675
677
676 # The diff did not remove or add the metadata header, it's then in the same
678 # The diff did not remove or add the metadata header, it's then in the same
677 # situation as its parent
679 # situation as its parent
678 metadata_cache[filerev] = parent_has_metadata
680 metadata_cache[filerev] = parent_has_metadata
679 return parent_has_metadata
681 return parent_has_metadata
680
682
681
683
682 def _from_report(ui, repo, context, from_report, dry_run):
684 def _from_report(ui, repo, context, from_report, dry_run):
683 """
685 """
684 Fix the revisions given in the `from_report` file, but still checks if the
686 Fix the revisions given in the `from_report` file, but still checks if the
685 revisions are indeed affected to prevent an unfortunate cyclic situation
687 revisions are indeed affected to prevent an unfortunate cyclic situation
686 where we'd swap well-ordered parents again.
688 where we'd swap well-ordered parents again.
687
689
688 See the doc for `debug_fix_issue6528` for the format documentation.
690 See the doc for `debug_fix_issue6528` for the format documentation.
689 """
691 """
690 ui.write(_(b"loading report file '%s'\n") % from_report)
692 ui.write(_(b"loading report file '%s'\n") % from_report)
691
693
692 with context(), open(from_report, mode='rb') as f:
694 with context(), open(from_report, mode='rb') as f:
693 for line in f.read().split(b'\n'):
695 for line in f.read().split(b'\n'):
694 if not line:
696 if not line:
695 continue
697 continue
696 filenodes, filename = line.split(b' ', 1)
698 filenodes, filename = line.split(b' ', 1)
697 fl = _filelog_from_filename(repo, filename)
699 fl = _filelog_from_filename(repo, filename)
698 to_fix = set(
700 to_fix = set(
699 fl.rev(binascii.unhexlify(n)) for n in filenodes.split(b',')
701 fl.rev(binascii.unhexlify(n)) for n in filenodes.split(b',')
700 )
702 )
701 excluded = set()
703 excluded = set()
702
704
703 for filerev in to_fix:
705 for filerev in to_fix:
704 if _is_revision_affected(fl, filerev):
706 if _is_revision_affected(fl, filerev):
705 msg = b"found affected revision %d for filelog '%s'\n"
707 msg = b"found affected revision %d for filelog '%s'\n"
706 ui.warn(msg % (filerev, filename))
708 ui.warn(msg % (filerev, filename))
707 else:
709 else:
708 msg = _(b"revision %s of file '%s' is not affected\n")
710 msg = _(b"revision %s of file '%s' is not affected\n")
709 msg %= (binascii.hexlify(fl.node(filerev)), filename)
711 msg %= (binascii.hexlify(fl.node(filerev)), filename)
710 ui.warn(msg)
712 ui.warn(msg)
711 excluded.add(filerev)
713 excluded.add(filerev)
712
714
713 to_fix = to_fix - excluded
715 to_fix = to_fix - excluded
714 if not to_fix:
716 if not to_fix:
715 msg = _(b"no affected revisions were found for '%s'\n")
717 msg = _(b"no affected revisions were found for '%s'\n")
716 ui.write(msg % filename)
718 ui.write(msg % filename)
717 continue
719 continue
718 if not dry_run:
720 if not dry_run:
719 _reorder_filelog_parents(repo, fl, sorted(to_fix))
721 _reorder_filelog_parents(repo, fl, sorted(to_fix))
720
722
721
723
724 def filter_delta_issue6528(revlog, deltas_iter):
725 """filter incomind deltas to repaire issue 6528 on the fly"""
726 metadata_cache = {}
727
728 deltacomputer = deltas.deltacomputer(revlog)
729
730 for rev, d in enumerate(deltas_iter, len(revlog)):
731 (
732 node,
733 p1_node,
734 p2_node,
735 linknode,
736 deltabase,
737 delta,
738 flags,
739 sidedata,
740 ) = d
741
742 if not revlog.index.has_node(deltabase):
743 raise error.LookupError(
744 deltabase, revlog.radix, _(b'unknown parent')
745 )
746 base_rev = revlog.rev(deltabase)
747 if not revlog.index.has_node(p1_node):
748 raise error.LookupError(p1_node, revlog.radix, _(b'unknown parent'))
749 p1_rev = revlog.rev(p1_node)
750 if not revlog.index.has_node(p2_node):
751 raise error.LookupError(p2_node, revlog.radix, _(b'unknown parent'))
752 p2_rev = revlog.rev(p2_node)
753
754 is_censored = lambda: bool(flags & REVIDX_ISCENSORED)
755 delta_base = lambda: revlog.rev(delta_base)
756 delta_base = lambda: base_rev
757 parent_revs = lambda: (p1_rev, p2_rev)
758
759 def full_text():
760 # note: being able to reuse the full text computation in the
761 # underlying addrevision would be useful however this is a bit too
762 # intrusive the for the "quick" issue6528 we are writing before the
763 # 5.8 release
764 textlen = mdiff.patchedsize(revlog.size(base_rev), delta)
765
766 revinfo = revlogutils.revisioninfo(
767 node,
768 p1_node,
769 p2_node,
770 [None],
771 textlen,
772 (base_rev, delta),
773 flags,
774 )
775 # cached by the global "writing" context
776 assert revlog._writinghandles is not None
777 if revlog._inline:
778 fh = revlog._writinghandles[0]
779 else:
780 fh = revlog._writinghandles[1]
781 return deltacomputer.buildtext(revinfo, fh)
782
783 is_affected = _is_revision_affected_fast_inner(
784 is_censored,
785 delta_base,
786 lambda: delta,
787 full_text,
788 parent_revs,
789 rev,
790 metadata_cache,
791 )
792 if is_affected:
793 d = (
794 node,
795 p2_node,
796 p1_node,
797 linknode,
798 deltabase,
799 delta,
800 flags,
801 sidedata,
802 )
803 yield d
804
805
722 def repair_issue6528(
806 def repair_issue6528(
723 ui, repo, dry_run=False, to_report=None, from_report=None, paranoid=False
807 ui, repo, dry_run=False, to_report=None, from_report=None, paranoid=False
724 ):
808 ):
725 from .. import store # avoid cycle
809 from .. import store # avoid cycle
726
810
727 @contextlib.contextmanager
811 @contextlib.contextmanager
728 def context():
812 def context():
729 if dry_run or to_report: # No need for locking
813 if dry_run or to_report: # No need for locking
730 yield
814 yield
731 else:
815 else:
732 with repo.wlock(), repo.lock():
816 with repo.wlock(), repo.lock():
733 yield
817 yield
734
818
735 if from_report:
819 if from_report:
736 return _from_report(ui, repo, context, from_report, dry_run)
820 return _from_report(ui, repo, context, from_report, dry_run)
737
821
738 report_entries = []
822 report_entries = []
739
823
740 with context():
824 with context():
741 files = list(
825 files = list(
742 (file_type, path)
826 (file_type, path)
743 for (file_type, path, _e, _s) in repo.store.datafiles()
827 for (file_type, path, _e, _s) in repo.store.datafiles()
744 if path.endswith(b'.i') and file_type & store.FILEFLAGS_FILELOG
828 if path.endswith(b'.i') and file_type & store.FILEFLAGS_FILELOG
745 )
829 )
746
830
747 progress = ui.makeprogress(
831 progress = ui.makeprogress(
748 _(b"looking for affected revisions"),
832 _(b"looking for affected revisions"),
749 unit=_(b"filelogs"),
833 unit=_(b"filelogs"),
750 total=len(files),
834 total=len(files),
751 )
835 )
752 found_nothing = True
836 found_nothing = True
753
837
754 for file_type, path in files:
838 for file_type, path in files:
755 if (
839 if (
756 not path.endswith(b'.i')
840 not path.endswith(b'.i')
757 or not file_type & store.FILEFLAGS_FILELOG
841 or not file_type & store.FILEFLAGS_FILELOG
758 ):
842 ):
759 continue
843 continue
760 progress.increment()
844 progress.increment()
761 filename = _get_filename_from_filelog_index(path)
845 filename = _get_filename_from_filelog_index(path)
762 fl = _filelog_from_filename(repo, filename)
846 fl = _filelog_from_filename(repo, filename)
763
847
764 # Set of filerevs (or hex filenodes if `to_report`) that need fixing
848 # Set of filerevs (or hex filenodes if `to_report`) that need fixing
765 to_fix = set()
849 to_fix = set()
766 metadata_cache = {}
850 metadata_cache = {}
767 for filerev in fl.revs():
851 for filerev in fl.revs():
768 affected = _is_revision_affected_fast(
852 affected = _is_revision_affected_fast(
769 repo, fl, filerev, metadata_cache
853 repo, fl, filerev, metadata_cache
770 )
854 )
771 if paranoid:
855 if paranoid:
772 slow = _is_revision_affected(fl, filerev)
856 slow = _is_revision_affected(fl, filerev)
773 if slow != affected:
857 if slow != affected:
774 msg = _(b"paranoid check failed for '%s' at node %s")
858 msg = _(b"paranoid check failed for '%s' at node %s")
775 node = binascii.hexlify(fl.node(filerev))
859 node = binascii.hexlify(fl.node(filerev))
776 raise error.Abort(msg % (filename, node))
860 raise error.Abort(msg % (filename, node))
777 if affected:
861 if affected:
778 msg = b"found affected revision %d for filelog '%s'\n"
862 msg = b"found affected revision %d for filelog '%s'\n"
779 ui.warn(msg % (filerev, path))
863 ui.warn(msg % (filerev, path))
780 found_nothing = False
864 found_nothing = False
781 if not dry_run:
865 if not dry_run:
782 if to_report:
866 if to_report:
783 to_fix.add(binascii.hexlify(fl.node(filerev)))
867 to_fix.add(binascii.hexlify(fl.node(filerev)))
784 else:
868 else:
785 to_fix.add(filerev)
869 to_fix.add(filerev)
786
870
787 if to_fix:
871 if to_fix:
788 to_fix = sorted(to_fix)
872 to_fix = sorted(to_fix)
789 if to_report:
873 if to_report:
790 report_entries.append((filename, to_fix))
874 report_entries.append((filename, to_fix))
791 else:
875 else:
792 _reorder_filelog_parents(repo, fl, to_fix)
876 _reorder_filelog_parents(repo, fl, to_fix)
793
877
794 if found_nothing:
878 if found_nothing:
795 ui.write(_(b"no affected revisions were found\n"))
879 ui.write(_(b"no affected revisions were found\n"))
796
880
797 if to_report and report_entries:
881 if to_report and report_entries:
798 with open(to_report, mode="wb") as f:
882 with open(to_report, mode="wb") as f:
799 for path, to_fix in report_entries:
883 for path, to_fix in report_entries:
800 f.write(b"%s %s\n" % (b",".join(to_fix), path))
884 f.write(b"%s %s\n" % (b",".join(to_fix), path))
801
885
802 progress.complete()
886 progress.complete()
@@ -1,433 +1,526
1 ===============================================================
1 ===============================================================
2 Test non-regression on the corruption associated with issue6528
2 Test non-regression on the corruption associated with issue6528
3 ===============================================================
3 ===============================================================
4
4
5 Setup
5 Setup
6 =====
6 =====
7
7
8 $ hg init base-repo
8 $ hg init base-repo
9 $ cd base-repo
9 $ cd base-repo
10
10
11 $ cat <<EOF > a.txt
11 $ cat <<EOF > a.txt
12 > 1
12 > 1
13 > 2
13 > 2
14 > 3
14 > 3
15 > 4
15 > 4
16 > 5
16 > 5
17 > 6
17 > 6
18 > EOF
18 > EOF
19
19
20 $ hg add a.txt
20 $ hg add a.txt
21 $ hg commit -m 'c_base_c - create a.txt'
21 $ hg commit -m 'c_base_c - create a.txt'
22
22
23 Modify a.txt
23 Modify a.txt
24
24
25 $ sed -e 's/1/foo/' a.txt > a.tmp; mv a.tmp a.txt
25 $ sed -e 's/1/foo/' a.txt > a.tmp; mv a.tmp a.txt
26 $ hg commit -m 'c_modify_c - modify a.txt'
26 $ hg commit -m 'c_modify_c - modify a.txt'
27
27
28 Modify and rename a.txt to b.txt
28 Modify and rename a.txt to b.txt
29
29
30 $ hg up -r "desc('c_base_c')"
30 $ hg up -r "desc('c_base_c')"
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 $ sed -e 's/6/bar/' a.txt > a.tmp; mv a.tmp a.txt
32 $ sed -e 's/6/bar/' a.txt > a.tmp; mv a.tmp a.txt
33 $ hg mv a.txt b.txt
33 $ hg mv a.txt b.txt
34 $ hg commit -m 'c_rename_c - rename and modify a.txt to b.txt'
34 $ hg commit -m 'c_rename_c - rename and modify a.txt to b.txt'
35 created new head
35 created new head
36
36
37 Merge each branch
37 Merge each branch
38
38
39 $ hg merge -r "desc('c_modify_c')"
39 $ hg merge -r "desc('c_modify_c')"
40 merging b.txt and a.txt to b.txt
40 merging b.txt and a.txt to b.txt
41 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
41 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
42 (branch merge, don't forget to commit)
42 (branch merge, don't forget to commit)
43 $ hg commit -m 'c_merge_c: commit merge'
43 $ hg commit -m 'c_merge_c: commit merge'
44
44
45 $ hg debugrevlogindex b.txt
45 $ hg debugrevlogindex b.txt
46 rev linkrev nodeid p1 p2
46 rev linkrev nodeid p1 p2
47 0 2 05b806ebe5ea 000000000000 000000000000
47 0 2 05b806ebe5ea 000000000000 000000000000
48 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
48 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
49
49
50 Check commit Graph
50 Check commit Graph
51
51
52 $ hg log -G
52 $ hg log -G
53 @ changeset: 3:a1cc2bdca0aa
53 @ changeset: 3:a1cc2bdca0aa
54 |\ tag: tip
54 |\ tag: tip
55 | | parent: 2:615c6ccefd15
55 | | parent: 2:615c6ccefd15
56 | | parent: 1:373d507f4667
56 | | parent: 1:373d507f4667
57 | | user: test
57 | | user: test
58 | | date: Thu Jan 01 00:00:00 1970 +0000
58 | | date: Thu Jan 01 00:00:00 1970 +0000
59 | | summary: c_merge_c: commit merge
59 | | summary: c_merge_c: commit merge
60 | |
60 | |
61 | o changeset: 2:615c6ccefd15
61 | o changeset: 2:615c6ccefd15
62 | | parent: 0:f5a5a568022f
62 | | parent: 0:f5a5a568022f
63 | | user: test
63 | | user: test
64 | | date: Thu Jan 01 00:00:00 1970 +0000
64 | | date: Thu Jan 01 00:00:00 1970 +0000
65 | | summary: c_rename_c - rename and modify a.txt to b.txt
65 | | summary: c_rename_c - rename and modify a.txt to b.txt
66 | |
66 | |
67 o | changeset: 1:373d507f4667
67 o | changeset: 1:373d507f4667
68 |/ user: test
68 |/ user: test
69 | date: Thu Jan 01 00:00:00 1970 +0000
69 | date: Thu Jan 01 00:00:00 1970 +0000
70 | summary: c_modify_c - modify a.txt
70 | summary: c_modify_c - modify a.txt
71 |
71 |
72 o changeset: 0:f5a5a568022f
72 o changeset: 0:f5a5a568022f
73 user: test
73 user: test
74 date: Thu Jan 01 00:00:00 1970 +0000
74 date: Thu Jan 01 00:00:00 1970 +0000
75 summary: c_base_c - create a.txt
75 summary: c_base_c - create a.txt
76
76
77
77
78 $ hg cat -r . b.txt
78 $ hg cat -r . b.txt
79 foo
79 foo
80 2
80 2
81 3
81 3
82 4
82 4
83 5
83 5
84 bar
84 bar
85 $ cat b.txt
85 $ cat b.txt
86 foo
86 foo
87 2
87 2
88 3
88 3
89 4
89 4
90 5
90 5
91 bar
91 bar
92 $ cd ..
92 $ cd ..
93
93
94
94
95 Check the lack of corruption
95 Check the lack of corruption
96 ============================
96 ============================
97
97
98 $ hg clone --pull base-repo cloned
98 $ hg clone --pull base-repo cloned
99 requesting all changes
99 requesting all changes
100 adding changesets
100 adding changesets
101 adding manifests
101 adding manifests
102 adding file changes
102 adding file changes
103 added 4 changesets with 4 changes to 2 files
103 added 4 changesets with 4 changes to 2 files
104 new changesets f5a5a568022f:a1cc2bdca0aa
104 new changesets f5a5a568022f:a1cc2bdca0aa
105 updating to branch default
105 updating to branch default
106 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
106 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
107 $ cd cloned
107 $ cd cloned
108 $ hg up -r "desc('c_merge_c')"
108 $ hg up -r "desc('c_merge_c')"
109 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
110
110
111
111
112 Status is buggy, even with debugrebuilddirstate
112 Status is buggy, even with debugrebuilddirstate
113
113
114 $ hg cat -r . b.txt
114 $ hg cat -r . b.txt
115 foo
115 foo
116 2
116 2
117 3
117 3
118 4
118 4
119 5
119 5
120 bar
120 bar
121 $ cat b.txt
121 $ cat b.txt
122 foo
122 foo
123 2
123 2
124 3
124 3
125 4
125 4
126 5
126 5
127 bar
127 bar
128 $ hg status
128 $ hg status
129 $ hg debugrebuilddirstate
129 $ hg debugrebuilddirstate
130 $ hg status
130 $ hg status
131
131
132 the history was altered
132 the history was altered
133
133
134 in theory p1/p2 order does not matter but in practice p1 == nullid is used as a
134 in theory p1/p2 order does not matter but in practice p1 == nullid is used as a
135 marker that some metadata are present and should be fetched.
135 marker that some metadata are present and should be fetched.
136
136
137 $ hg debugrevlogindex b.txt
137 $ hg debugrevlogindex b.txt
138 rev linkrev nodeid p1 p2
138 rev linkrev nodeid p1 p2
139 0 2 05b806ebe5ea 000000000000 000000000000
139 0 2 05b806ebe5ea 000000000000 000000000000
140 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
140 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
141
141
142 Check commit Graph
142 Check commit Graph
143
143
144 $ hg log -G
144 $ hg log -G
145 @ changeset: 3:a1cc2bdca0aa
145 @ changeset: 3:a1cc2bdca0aa
146 |\ tag: tip
146 |\ tag: tip
147 | | parent: 2:615c6ccefd15
147 | | parent: 2:615c6ccefd15
148 | | parent: 1:373d507f4667
148 | | parent: 1:373d507f4667
149 | | user: test
149 | | user: test
150 | | date: Thu Jan 01 00:00:00 1970 +0000
150 | | date: Thu Jan 01 00:00:00 1970 +0000
151 | | summary: c_merge_c: commit merge
151 | | summary: c_merge_c: commit merge
152 | |
152 | |
153 | o changeset: 2:615c6ccefd15
153 | o changeset: 2:615c6ccefd15
154 | | parent: 0:f5a5a568022f
154 | | parent: 0:f5a5a568022f
155 | | user: test
155 | | user: test
156 | | date: Thu Jan 01 00:00:00 1970 +0000
156 | | date: Thu Jan 01 00:00:00 1970 +0000
157 | | summary: c_rename_c - rename and modify a.txt to b.txt
157 | | summary: c_rename_c - rename and modify a.txt to b.txt
158 | |
158 | |
159 o | changeset: 1:373d507f4667
159 o | changeset: 1:373d507f4667
160 |/ user: test
160 |/ user: test
161 | date: Thu Jan 01 00:00:00 1970 +0000
161 | date: Thu Jan 01 00:00:00 1970 +0000
162 | summary: c_modify_c - modify a.txt
162 | summary: c_modify_c - modify a.txt
163 |
163 |
164 o changeset: 0:f5a5a568022f
164 o changeset: 0:f5a5a568022f
165 user: test
165 user: test
166 date: Thu Jan 01 00:00:00 1970 +0000
166 date: Thu Jan 01 00:00:00 1970 +0000
167 summary: c_base_c - create a.txt
167 summary: c_base_c - create a.txt
168
168
169
169
170 Test the command that fixes the issue
170 Test the command that fixes the issue
171 =====================================
171 =====================================
172
172
173 Restore a broken repository with multiple broken revisions and a filename that
173 Restore a broken repository with multiple broken revisions and a filename that
174 would get encoded to test the `report` options.
174 would get encoded to test the `report` options.
175 It's a tarball because unbundle might magically fix the issue later.
175 It's a tarball because unbundle might magically fix the issue later.
176
176
177 $ cd ..
177 $ cd ..
178 $ mkdir repo-to-fix
178 $ mkdir repo-to-fix
179 $ cd repo-to-fix
179 $ cd repo-to-fix
180 #if windows
180 #if windows
181 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
181 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
182 only since some versions of tar don't have this flag.
182 only since some versions of tar don't have this flag.
183
183
184 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
184 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
185 #else
185 #else
186 $ tar xf $TESTDIR/bundles/issue6528.tar
186 $ tar xf $TESTDIR/bundles/issue6528.tar
187 #endif
187 #endif
188
188
189 Check that the issue is present
189 Check that the issue is present
190 $ hg st
190 $ hg st
191 M D.txt
191 M D.txt
192 M b.txt
192 M b.txt
193 $ hg debugrevlogindex b.txt
193 $ hg debugrevlogindex b.txt
194 rev linkrev nodeid p1 p2
194 rev linkrev nodeid p1 p2
195 0 2 05b806ebe5ea 000000000000 000000000000
195 0 2 05b806ebe5ea 000000000000 000000000000
196 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
196 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
197 2 6 216a5fe8b8ed 000000000000 000000000000
197 2 6 216a5fe8b8ed 000000000000 000000000000
198 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
198 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
199 $ hg debugrevlogindex D.txt
199 $ hg debugrevlogindex D.txt
200 rev linkrev nodeid p1 p2
200 rev linkrev nodeid p1 p2
201 0 6 2a8d3833f2fb 000000000000 000000000000
201 0 6 2a8d3833f2fb 000000000000 000000000000
202 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
202 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
203
203
204 Dry-run the fix
204 Dry-run the fix
205 $ hg debug-repair-issue6528 --dry-run
205 $ hg debug-repair-issue6528 --dry-run
206 found affected revision 1 for filelog 'data/D.txt.i'
206 found affected revision 1 for filelog 'data/D.txt.i'
207 found affected revision 1 for filelog 'data/b.txt.i'
207 found affected revision 1 for filelog 'data/b.txt.i'
208 found affected revision 3 for filelog 'data/b.txt.i'
208 found affected revision 3 for filelog 'data/b.txt.i'
209 $ hg st
209 $ hg st
210 M D.txt
210 M D.txt
211 M b.txt
211 M b.txt
212 $ hg debugrevlogindex b.txt
212 $ hg debugrevlogindex b.txt
213 rev linkrev nodeid p1 p2
213 rev linkrev nodeid p1 p2
214 0 2 05b806ebe5ea 000000000000 000000000000
214 0 2 05b806ebe5ea 000000000000 000000000000
215 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
215 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
216 2 6 216a5fe8b8ed 000000000000 000000000000
216 2 6 216a5fe8b8ed 000000000000 000000000000
217 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
217 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
218 $ hg debugrevlogindex D.txt
218 $ hg debugrevlogindex D.txt
219 rev linkrev nodeid p1 p2
219 rev linkrev nodeid p1 p2
220 0 6 2a8d3833f2fb 000000000000 000000000000
220 0 6 2a8d3833f2fb 000000000000 000000000000
221 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
221 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
222
222
223 Test the --paranoid option
223 Test the --paranoid option
224 $ hg debug-repair-issue6528 --dry-run --paranoid
224 $ hg debug-repair-issue6528 --dry-run --paranoid
225 found affected revision 1 for filelog 'data/D.txt.i'
225 found affected revision 1 for filelog 'data/D.txt.i'
226 found affected revision 1 for filelog 'data/b.txt.i'
226 found affected revision 1 for filelog 'data/b.txt.i'
227 found affected revision 3 for filelog 'data/b.txt.i'
227 found affected revision 3 for filelog 'data/b.txt.i'
228 $ hg st
228 $ hg st
229 M D.txt
229 M D.txt
230 M b.txt
230 M b.txt
231 $ hg debugrevlogindex b.txt
231 $ hg debugrevlogindex b.txt
232 rev linkrev nodeid p1 p2
232 rev linkrev nodeid p1 p2
233 0 2 05b806ebe5ea 000000000000 000000000000
233 0 2 05b806ebe5ea 000000000000 000000000000
234 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
234 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
235 2 6 216a5fe8b8ed 000000000000 000000000000
235 2 6 216a5fe8b8ed 000000000000 000000000000
236 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
236 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
237 $ hg debugrevlogindex D.txt
237 $ hg debugrevlogindex D.txt
238 rev linkrev nodeid p1 p2
238 rev linkrev nodeid p1 p2
239 0 6 2a8d3833f2fb 000000000000 000000000000
239 0 6 2a8d3833f2fb 000000000000 000000000000
240 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
240 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
241
241
242 Run the fix
242 Run the fix
243 $ hg debug-repair-issue6528
243 $ hg debug-repair-issue6528
244 found affected revision 1 for filelog 'data/D.txt.i'
244 found affected revision 1 for filelog 'data/D.txt.i'
245 repaired revision 1 of 'filelog data/D.txt.i'
245 repaired revision 1 of 'filelog data/D.txt.i'
246 found affected revision 1 for filelog 'data/b.txt.i'
246 found affected revision 1 for filelog 'data/b.txt.i'
247 found affected revision 3 for filelog 'data/b.txt.i'
247 found affected revision 3 for filelog 'data/b.txt.i'
248 repaired revision 1 of 'filelog data/b.txt.i'
248 repaired revision 1 of 'filelog data/b.txt.i'
249 repaired revision 3 of 'filelog data/b.txt.i'
249 repaired revision 3 of 'filelog data/b.txt.i'
250
250
251 Check that the fix worked and that running it twice does nothing
251 Check that the fix worked and that running it twice does nothing
252 $ hg st
252 $ hg st
253 $ hg debugrevlogindex b.txt
253 $ hg debugrevlogindex b.txt
254 rev linkrev nodeid p1 p2
254 rev linkrev nodeid p1 p2
255 0 2 05b806ebe5ea 000000000000 000000000000
255 0 2 05b806ebe5ea 000000000000 000000000000
256 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
256 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
257 2 6 216a5fe8b8ed 000000000000 000000000000
257 2 6 216a5fe8b8ed 000000000000 000000000000
258 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
258 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
259 $ hg debugrevlogindex D.txt
259 $ hg debugrevlogindex D.txt
260 rev linkrev nodeid p1 p2
260 rev linkrev nodeid p1 p2
261 0 6 2a8d3833f2fb 000000000000 000000000000
261 0 6 2a8d3833f2fb 000000000000 000000000000
262 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
262 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
263 $ hg debug-repair-issue6528
263 $ hg debug-repair-issue6528
264 no affected revisions were found
264 no affected revisions were found
265 $ hg st
265 $ hg st
266 $ hg debugrevlogindex b.txt
266 $ hg debugrevlogindex b.txt
267 rev linkrev nodeid p1 p2
267 rev linkrev nodeid p1 p2
268 0 2 05b806ebe5ea 000000000000 000000000000
268 0 2 05b806ebe5ea 000000000000 000000000000
269 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
269 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
270 2 6 216a5fe8b8ed 000000000000 000000000000
270 2 6 216a5fe8b8ed 000000000000 000000000000
271 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
271 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
272 $ hg debugrevlogindex D.txt
272 $ hg debugrevlogindex D.txt
273 rev linkrev nodeid p1 p2
273 rev linkrev nodeid p1 p2
274 0 6 2a8d3833f2fb 000000000000 000000000000
274 0 6 2a8d3833f2fb 000000000000 000000000000
275 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
275 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
276
276
277 Try the using the report options
277 Try the using the report options
278 --------------------------------
278 --------------------------------
279
279
280 $ cd ..
280 $ cd ..
281 $ mkdir repo-to-fix-report
281 $ mkdir repo-to-fix-report
282 $ cd repo-to-fix
282 $ cd repo-to-fix
283 #if windows
283 #if windows
284 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
284 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
285 only since some versions of tar don't have this flag.
285 only since some versions of tar don't have this flag.
286
286
287 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
287 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
288 #else
288 #else
289 $ tar xf $TESTDIR/bundles/issue6528.tar
289 $ tar xf $TESTDIR/bundles/issue6528.tar
290 #endif
290 #endif
291
291
292 $ hg debug-repair-issue6528 --to-report $TESTTMP/report.txt
292 $ hg debug-repair-issue6528 --to-report $TESTTMP/report.txt
293 found affected revision 1 for filelog 'data/D.txt.i'
293 found affected revision 1 for filelog 'data/D.txt.i'
294 found affected revision 1 for filelog 'data/b.txt.i'
294 found affected revision 1 for filelog 'data/b.txt.i'
295 found affected revision 3 for filelog 'data/b.txt.i'
295 found affected revision 3 for filelog 'data/b.txt.i'
296 $ cat $TESTTMP/report.txt
296 $ cat $TESTTMP/report.txt
297 2a80419dfc31d7dfb308ac40f3f138282de7d73b D.txt
297 2a80419dfc31d7dfb308ac40f3f138282de7d73b D.txt
298 a58b36ad6b6545195952793099613c2116f3563b,ea4f2f2463cca5b29ddf3461012b8ce5c6dac175 b.txt
298 a58b36ad6b6545195952793099613c2116f3563b,ea4f2f2463cca5b29ddf3461012b8ce5c6dac175 b.txt
299
299
300 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt --dry-run
300 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt --dry-run
301 loading report file '$TESTTMP/report.txt'
301 loading report file '$TESTTMP/report.txt'
302 found affected revision 1 for filelog 'D.txt'
302 found affected revision 1 for filelog 'D.txt'
303 found affected revision 1 for filelog 'b.txt'
303 found affected revision 1 for filelog 'b.txt'
304 found affected revision 3 for filelog 'b.txt'
304 found affected revision 3 for filelog 'b.txt'
305 $ hg st
305 $ hg st
306 M D.txt
306 M D.txt
307 M b.txt
307 M b.txt
308 $ hg debugrevlogindex b.txt
308 $ hg debugrevlogindex b.txt
309 rev linkrev nodeid p1 p2
309 rev linkrev nodeid p1 p2
310 0 2 05b806ebe5ea 000000000000 000000000000
310 0 2 05b806ebe5ea 000000000000 000000000000
311 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
311 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
312 2 6 216a5fe8b8ed 000000000000 000000000000
312 2 6 216a5fe8b8ed 000000000000 000000000000
313 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
313 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
314 $ hg debugrevlogindex D.txt
314 $ hg debugrevlogindex D.txt
315 rev linkrev nodeid p1 p2
315 rev linkrev nodeid p1 p2
316 0 6 2a8d3833f2fb 000000000000 000000000000
316 0 6 2a8d3833f2fb 000000000000 000000000000
317 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
317 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
318
318
319 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt
319 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt
320 loading report file '$TESTTMP/report.txt'
320 loading report file '$TESTTMP/report.txt'
321 found affected revision 1 for filelog 'D.txt'
321 found affected revision 1 for filelog 'D.txt'
322 repaired revision 1 of 'filelog data/D.txt.i'
322 repaired revision 1 of 'filelog data/D.txt.i'
323 found affected revision 1 for filelog 'b.txt'
323 found affected revision 1 for filelog 'b.txt'
324 found affected revision 3 for filelog 'b.txt'
324 found affected revision 3 for filelog 'b.txt'
325 repaired revision 1 of 'filelog data/b.txt.i'
325 repaired revision 1 of 'filelog data/b.txt.i'
326 repaired revision 3 of 'filelog data/b.txt.i'
326 repaired revision 3 of 'filelog data/b.txt.i'
327 $ hg st
327 $ hg st
328 $ hg debugrevlogindex b.txt
328 $ hg debugrevlogindex b.txt
329 rev linkrev nodeid p1 p2
329 rev linkrev nodeid p1 p2
330 0 2 05b806ebe5ea 000000000000 000000000000
330 0 2 05b806ebe5ea 000000000000 000000000000
331 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
331 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
332 2 6 216a5fe8b8ed 000000000000 000000000000
332 2 6 216a5fe8b8ed 000000000000 000000000000
333 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
333 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
334 $ hg debugrevlogindex D.txt
334 $ hg debugrevlogindex D.txt
335 rev linkrev nodeid p1 p2
335 rev linkrev nodeid p1 p2
336 0 6 2a8d3833f2fb 000000000000 000000000000
336 0 6 2a8d3833f2fb 000000000000 000000000000
337 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
337 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
338
338
339 Check that the revision is not "fixed" again
339 Check that the revision is not "fixed" again
340
340
341 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt
341 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt
342 loading report file '$TESTTMP/report.txt'
342 loading report file '$TESTTMP/report.txt'
343 revision 2a80419dfc31d7dfb308ac40f3f138282de7d73b of file 'D.txt' is not affected
343 revision 2a80419dfc31d7dfb308ac40f3f138282de7d73b of file 'D.txt' is not affected
344 no affected revisions were found for 'D.txt'
344 no affected revisions were found for 'D.txt'
345 revision a58b36ad6b6545195952793099613c2116f3563b of file 'b.txt' is not affected
345 revision a58b36ad6b6545195952793099613c2116f3563b of file 'b.txt' is not affected
346 revision ea4f2f2463cca5b29ddf3461012b8ce5c6dac175 of file 'b.txt' is not affected
346 revision ea4f2f2463cca5b29ddf3461012b8ce5c6dac175 of file 'b.txt' is not affected
347 no affected revisions were found for 'b.txt'
347 no affected revisions were found for 'b.txt'
348 $ hg st
348 $ hg st
349 $ hg debugrevlogindex b.txt
349 $ hg debugrevlogindex b.txt
350 rev linkrev nodeid p1 p2
350 rev linkrev nodeid p1 p2
351 0 2 05b806ebe5ea 000000000000 000000000000
351 0 2 05b806ebe5ea 000000000000 000000000000
352 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
352 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
353 2 6 216a5fe8b8ed 000000000000 000000000000
353 2 6 216a5fe8b8ed 000000000000 000000000000
354 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
354 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
355 $ hg debugrevlogindex D.txt
355 $ hg debugrevlogindex D.txt
356 rev linkrev nodeid p1 p2
356 rev linkrev nodeid p1 p2
357 0 6 2a8d3833f2fb 000000000000 000000000000
357 0 6 2a8d3833f2fb 000000000000 000000000000
358 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
358 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
359
359
360 Try it with a non-inline revlog
360 Try it with a non-inline revlog
361 -------------------------------
361 -------------------------------
362
362
363 $ cd ..
363 $ cd ..
364 $ mkdir $TESTTMP/ext
364 $ mkdir $TESTTMP/ext
365 $ cat << EOF > $TESTTMP/ext/small_inline.py
365 $ cat << EOF > $TESTTMP/ext/small_inline.py
366 > from mercurial import revlog
366 > from mercurial import revlog
367 > revlog._maxinline = 8
367 > revlog._maxinline = 8
368 > EOF
368 > EOF
369
369
370 $ cat << EOF >> $HGRCPATH
370 $ cat << EOF >> $HGRCPATH
371 > [extensions]
371 > [extensions]
372 > small_inline=$TESTTMP/ext/small_inline.py
372 > small_inline=$TESTTMP/ext/small_inline.py
373 > EOF
373 > EOF
374
374
375 $ mkdir repo-to-fix-not-inline
375 $ mkdir repo-to-fix-not-inline
376 $ cd repo-to-fix-not-inline
376 $ cd repo-to-fix-not-inline
377 #if windows
377 #if windows
378 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
378 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
379 only since some versions of tar don't have this flag.
379 only since some versions of tar don't have this flag.
380
380
381 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
381 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
382 #else
382 #else
383 $ tar xf $TESTDIR/bundles/issue6528.tar
383 $ tar xf $TESTDIR/bundles/issue6528.tar
384 #endif
384 #endif
385 $ echo b >> b.txt
385 $ echo b >> b.txt
386 $ hg commit -qm "inline -> separate"
386 $ hg commit -qm "inline -> separate"
387 $ find .hg -name *b.txt.d
387 $ find .hg -name *b.txt.d
388 .hg/store/data/b.txt.d
388 .hg/store/data/b.txt.d
389
389
390 Status is correct, but the problem is still there, in the earlier revision
390 Status is correct, but the problem is still there, in the earlier revision
391 $ hg st
391 $ hg st
392 $ hg up 3
392 $ hg up 3
393 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
393 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
394 $ hg st
394 $ hg st
395 M b.txt
395 M b.txt
396 $ hg debugrevlogindex b.txt
396 $ hg debugrevlogindex b.txt
397 rev linkrev nodeid p1 p2
397 rev linkrev nodeid p1 p2
398 0 2 05b806ebe5ea 000000000000 000000000000
398 0 2 05b806ebe5ea 000000000000 000000000000
399 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
399 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
400 2 6 216a5fe8b8ed 000000000000 000000000000
400 2 6 216a5fe8b8ed 000000000000 000000000000
401 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
401 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
402 4 8 db234885e2fe ea4f2f2463cc 000000000000
402 4 8 db234885e2fe ea4f2f2463cc 000000000000
403 $ hg debugrevlogindex D.txt
403 $ hg debugrevlogindex D.txt
404 rev linkrev nodeid p1 p2
404 rev linkrev nodeid p1 p2
405 0 6 2a8d3833f2fb 000000000000 000000000000
405 0 6 2a8d3833f2fb 000000000000 000000000000
406 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
406 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
407 2 8 65aecc89bb5d 2a80419dfc31 000000000000
407 2 8 65aecc89bb5d 2a80419dfc31 000000000000
408
408
409 Run the fix on the non-inline revlog
409 Run the fix on the non-inline revlog
410 $ hg debug-repair-issue6528
410 $ hg debug-repair-issue6528
411 found affected revision 1 for filelog 'data/D.txt.i'
411 found affected revision 1 for filelog 'data/D.txt.i'
412 repaired revision 1 of 'filelog data/D.txt.i'
412 repaired revision 1 of 'filelog data/D.txt.i'
413 found affected revision 1 for filelog 'data/b.txt.i'
413 found affected revision 1 for filelog 'data/b.txt.i'
414 found affected revision 3 for filelog 'data/b.txt.i'
414 found affected revision 3 for filelog 'data/b.txt.i'
415 repaired revision 1 of 'filelog data/b.txt.i'
415 repaired revision 1 of 'filelog data/b.txt.i'
416 repaired revision 3 of 'filelog data/b.txt.i'
416 repaired revision 3 of 'filelog data/b.txt.i'
417
417
418 Check that it worked
418 Check that it worked
419 $ hg debugrevlogindex b.txt
419 $ hg debugrevlogindex b.txt
420 rev linkrev nodeid p1 p2
420 rev linkrev nodeid p1 p2
421 0 2 05b806ebe5ea 000000000000 000000000000
421 0 2 05b806ebe5ea 000000000000 000000000000
422 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
422 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
423 2 6 216a5fe8b8ed 000000000000 000000000000
423 2 6 216a5fe8b8ed 000000000000 000000000000
424 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
424 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
425 4 8 db234885e2fe ea4f2f2463cc 000000000000
425 4 8 db234885e2fe ea4f2f2463cc 000000000000
426 $ hg debugrevlogindex D.txt
426 $ hg debugrevlogindex D.txt
427 rev linkrev nodeid p1 p2
427 rev linkrev nodeid p1 p2
428 0 6 2a8d3833f2fb 000000000000 000000000000
428 0 6 2a8d3833f2fb 000000000000 000000000000
429 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
429 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
430 2 8 65aecc89bb5d 2a80419dfc31 000000000000
430 2 8 65aecc89bb5d 2a80419dfc31 000000000000
431 $ hg debug-repair-issue6528
431 $ hg debug-repair-issue6528
432 no affected revisions were found
432 no affected revisions were found
433 $ hg st
433 $ hg st
434
435 $ cd ..
436
437 Applying a bad bundle should fix it on the fly
438 ----------------------------------------------
439
440 from a v1 bundle
441 ~~~~~~~~~~~~~~~~
442
443 $ hg debugbundle --spec "$TESTDIR"/bundles/issue6528.hg-v1
444 bzip2-v1
445
446 $ hg init unbundle-v1
447 $ cd unbundle-v1
448
449 $ hg unbundle "$TESTDIR"/bundles/issue6528.hg-v1
450 adding changesets
451 adding manifests
452 adding file changes
453 added 8 changesets with 12 changes to 4 files
454 new changesets f5a5a568022f:3beabb508514 (8 drafts)
455 (run 'hg update' to get a working copy)
456
457 Check that revision were fixed on the fly
458
459 $ hg debugrevlogindex b.txt
460 rev linkrev nodeid p1 p2
461 0 2 05b806ebe5ea 000000000000 000000000000
462 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
463 2 6 216a5fe8b8ed 000000000000 000000000000
464 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
465
466 $ hg debugrevlogindex D.txt
467 rev linkrev nodeid p1 p2
468 0 6 2a8d3833f2fb 000000000000 000000000000
469 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
470
471 That we don't see the symptoms of the bug
472
473 $ hg up -- -1
474 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
475 $ hg status
476
477 And that the repair command does not find anything to fix
478
479 $ hg debug-repair-issue6528
480 no affected revisions were found
481
482 $ cd ..
483
484 from a v2 bundle
485 ~~~~~~~~~~~~~~~~
486
487 $ hg debugbundle --spec "$TESTDIR"/bundles/issue6528.hg-v2
488 bzip2-v2
489
490 $ hg init unbundle-v2
491 $ cd unbundle-v2
492
493 $ hg unbundle "$TESTDIR"/bundles/issue6528.hg-v2
494 adding changesets
495 adding manifests
496 adding file changes
497 added 8 changesets with 12 changes to 4 files
498 new changesets f5a5a568022f:3beabb508514 (8 drafts)
499 (run 'hg update' to get a working copy)
500
501 Check that revision were fixed on the fly
502
503 $ hg debugrevlogindex b.txt
504 rev linkrev nodeid p1 p2
505 0 2 05b806ebe5ea 000000000000 000000000000
506 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
507 2 6 216a5fe8b8ed 000000000000 000000000000
508 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
509
510 $ hg debugrevlogindex D.txt
511 rev linkrev nodeid p1 p2
512 0 6 2a8d3833f2fb 000000000000 000000000000
513 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
514
515 That we don't see the symptoms of the bug
516
517 $ hg up -- -1
518 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
519 $ hg status
520
521 And that the repair command does not find anything to fix
522
523 $ hg debug-repair-issue6528
524 no affected revisions were found
525
526 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now