##// END OF EJS Templates
rhg: desambiguate status without decompressing filelog if possible...
Simon Sapin -
r49378:e91aa800 default
parent child Browse files
Show More
@@ -1,194 +1,195 b''
1 # flagutils.py - code to deal with revlog flags and their processors
1 # flagutils.py - code to deal with revlog flags and their processors
2 #
2 #
3 # Copyright 2016 Remi Chaintron <remi@fb.com>
3 # Copyright 2016 Remi Chaintron <remi@fb.com>
4 # Copyright 2016-2019 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
4 # Copyright 2016-2019 Pierre-Yves David <pierre-yves.david@ens-lyon.org>
5 #
5 #
6 # This software may be used and distributed according to the terms of the
6 # This software may be used and distributed according to the terms of the
7 # GNU General Public License version 2 or any later version.
7 # GNU General Public License version 2 or any later version.
8
8
9 from __future__ import absolute_import
9 from __future__ import absolute_import
10
10
11 from ..i18n import _
11 from ..i18n import _
12
12
13 from .constants import (
13 from .constants import (
14 REVIDX_DEFAULT_FLAGS,
14 REVIDX_DEFAULT_FLAGS,
15 REVIDX_ELLIPSIS,
15 REVIDX_ELLIPSIS,
16 REVIDX_EXTSTORED,
16 REVIDX_EXTSTORED,
17 REVIDX_FLAGS_ORDER,
17 REVIDX_FLAGS_ORDER,
18 REVIDX_HASCOPIESINFO,
18 REVIDX_HASCOPIESINFO,
19 REVIDX_ISCENSORED,
19 REVIDX_ISCENSORED,
20 REVIDX_RAWTEXT_CHANGING_FLAGS,
20 REVIDX_RAWTEXT_CHANGING_FLAGS,
21 )
21 )
22
22
23 from .. import error, util
23 from .. import error, util
24
24
25 # blanked usage of all the name to prevent pyflakes constraints
25 # blanked usage of all the name to prevent pyflakes constraints
26 # We need these name available in the module for extensions.
26 # We need these name available in the module for extensions.
27 REVIDX_ISCENSORED
27 REVIDX_ISCENSORED
28 REVIDX_ELLIPSIS
28 REVIDX_ELLIPSIS
29 REVIDX_EXTSTORED
29 REVIDX_EXTSTORED
30 REVIDX_HASCOPIESINFO,
30 REVIDX_HASCOPIESINFO,
31 REVIDX_DEFAULT_FLAGS
31 REVIDX_DEFAULT_FLAGS
32 REVIDX_FLAGS_ORDER
32 REVIDX_FLAGS_ORDER
33 REVIDX_RAWTEXT_CHANGING_FLAGS
33 REVIDX_RAWTEXT_CHANGING_FLAGS
34
34
35 # Keep this in sync with REVIDX_KNOWN_FLAGS in rust/hg-core/src/revlog/revlog.rs
35 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
36 REVIDX_KNOWN_FLAGS = util.bitsfrom(REVIDX_FLAGS_ORDER)
36
37
37 # Store flag processors (cf. 'addflagprocessor()' to register)
38 # Store flag processors (cf. 'addflagprocessor()' to register)
38 flagprocessors = {
39 flagprocessors = {
39 REVIDX_ISCENSORED: None,
40 REVIDX_ISCENSORED: None,
40 REVIDX_HASCOPIESINFO: None,
41 REVIDX_HASCOPIESINFO: None,
41 }
42 }
42
43
43
44
44 def addflagprocessor(flag, processor):
45 def addflagprocessor(flag, processor):
45 """Register a flag processor on a revision data flag.
46 """Register a flag processor on a revision data flag.
46
47
47 Invariant:
48 Invariant:
48 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
49 - Flags need to be defined in REVIDX_KNOWN_FLAGS and REVIDX_FLAGS_ORDER,
49 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
50 and REVIDX_RAWTEXT_CHANGING_FLAGS if they can alter rawtext.
50 - Only one flag processor can be registered on a specific flag.
51 - Only one flag processor can be registered on a specific flag.
51 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
52 - flagprocessors must be 3-tuples of functions (read, write, raw) with the
52 following signatures:
53 following signatures:
53 - (read) f(self, rawtext) -> text, bool
54 - (read) f(self, rawtext) -> text, bool
54 - (write) f(self, text) -> rawtext, bool
55 - (write) f(self, text) -> rawtext, bool
55 - (raw) f(self, rawtext) -> bool
56 - (raw) f(self, rawtext) -> bool
56 "text" is presented to the user. "rawtext" is stored in revlog data, not
57 "text" is presented to the user. "rawtext" is stored in revlog data, not
57 directly visible to the user.
58 directly visible to the user.
58 The boolean returned by these transforms is used to determine whether
59 The boolean returned by these transforms is used to determine whether
59 the returned text can be used for hash integrity checking. For example,
60 the returned text can be used for hash integrity checking. For example,
60 if "write" returns False, then "text" is used to generate hash. If
61 if "write" returns False, then "text" is used to generate hash. If
61 "write" returns True, that basically means "rawtext" returned by "write"
62 "write" returns True, that basically means "rawtext" returned by "write"
62 should be used to generate hash. Usually, "write" and "read" return
63 should be used to generate hash. Usually, "write" and "read" return
63 different booleans. And "raw" returns a same boolean as "write".
64 different booleans. And "raw" returns a same boolean as "write".
64
65
65 Note: The 'raw' transform is used for changegroup generation and in some
66 Note: The 'raw' transform is used for changegroup generation and in some
66 debug commands. In this case the transform only indicates whether the
67 debug commands. In this case the transform only indicates whether the
67 contents can be used for hash integrity checks.
68 contents can be used for hash integrity checks.
68 """
69 """
69 insertflagprocessor(flag, processor, flagprocessors)
70 insertflagprocessor(flag, processor, flagprocessors)
70
71
71
72
72 def insertflagprocessor(flag, processor, flagprocessors):
73 def insertflagprocessor(flag, processor, flagprocessors):
73 if not flag & REVIDX_KNOWN_FLAGS:
74 if not flag & REVIDX_KNOWN_FLAGS:
74 msg = _(b"cannot register processor on unknown flag '%#x'.") % flag
75 msg = _(b"cannot register processor on unknown flag '%#x'.") % flag
75 raise error.ProgrammingError(msg)
76 raise error.ProgrammingError(msg)
76 if flag not in REVIDX_FLAGS_ORDER:
77 if flag not in REVIDX_FLAGS_ORDER:
77 msg = _(b"flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % flag
78 msg = _(b"flag '%#x' undefined in REVIDX_FLAGS_ORDER.") % flag
78 raise error.ProgrammingError(msg)
79 raise error.ProgrammingError(msg)
79 if flag in flagprocessors:
80 if flag in flagprocessors:
80 msg = _(b"cannot register multiple processors on flag '%#x'.") % flag
81 msg = _(b"cannot register multiple processors on flag '%#x'.") % flag
81 raise error.Abort(msg)
82 raise error.Abort(msg)
82 flagprocessors[flag] = processor
83 flagprocessors[flag] = processor
83
84
84
85
85 def processflagswrite(revlog, text, flags):
86 def processflagswrite(revlog, text, flags):
86 """Inspect revision data flags and applies write transformations defined
87 """Inspect revision data flags and applies write transformations defined
87 by registered flag processors.
88 by registered flag processors.
88
89
89 ``text`` - the revision data to process
90 ``text`` - the revision data to process
90 ``flags`` - the revision flags
91 ``flags`` - the revision flags
91
92
92 This method processes the flags in the order (or reverse order if
93 This method processes the flags in the order (or reverse order if
93 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
94 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
94 flag processors registered for present flags. The order of flags defined
95 flag processors registered for present flags. The order of flags defined
95 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
96 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
96
97
97 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
98 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
98 processed text and ``validatehash`` is a bool indicating whether the
99 processed text and ``validatehash`` is a bool indicating whether the
99 returned text should be checked for hash integrity.
100 returned text should be checked for hash integrity.
100 """
101 """
101 return _processflagsfunc(
102 return _processflagsfunc(
102 revlog,
103 revlog,
103 text,
104 text,
104 flags,
105 flags,
105 b'write',
106 b'write',
106 )[:2]
107 )[:2]
107
108
108
109
109 def processflagsread(revlog, text, flags):
110 def processflagsread(revlog, text, flags):
110 """Inspect revision data flags and applies read transformations defined
111 """Inspect revision data flags and applies read transformations defined
111 by registered flag processors.
112 by registered flag processors.
112
113
113 ``text`` - the revision data to process
114 ``text`` - the revision data to process
114 ``flags`` - the revision flags
115 ``flags`` - the revision flags
115 ``raw`` - an optional argument describing if the raw transform should be
116 ``raw`` - an optional argument describing if the raw transform should be
116 applied.
117 applied.
117
118
118 This method processes the flags in the order (or reverse order if
119 This method processes the flags in the order (or reverse order if
119 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
120 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
120 flag processors registered for present flags. The order of flags defined
121 flag processors registered for present flags. The order of flags defined
121 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
122 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
122
123
123 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
124 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
124 processed text and ``validatehash`` is a bool indicating whether the
125 processed text and ``validatehash`` is a bool indicating whether the
125 returned text should be checked for hash integrity.
126 returned text should be checked for hash integrity.
126 """
127 """
127 return _processflagsfunc(revlog, text, flags, b'read')
128 return _processflagsfunc(revlog, text, flags, b'read')
128
129
129
130
130 def processflagsraw(revlog, text, flags):
131 def processflagsraw(revlog, text, flags):
131 """Inspect revision data flags to check is the content hash should be
132 """Inspect revision data flags to check is the content hash should be
132 validated.
133 validated.
133
134
134 ``text`` - the revision data to process
135 ``text`` - the revision data to process
135 ``flags`` - the revision flags
136 ``flags`` - the revision flags
136
137
137 This method processes the flags in the order (or reverse order if
138 This method processes the flags in the order (or reverse order if
138 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
139 ``operation`` is 'write') defined by REVIDX_FLAGS_ORDER, applying the
139 flag processors registered for present flags. The order of flags defined
140 flag processors registered for present flags. The order of flags defined
140 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
141 in REVIDX_FLAGS_ORDER needs to be stable to allow non-commutativity.
141
142
142 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
143 Returns a 2-tuple of ``(text, validatehash)`` where ``text`` is the
143 processed text and ``validatehash`` is a bool indicating whether the
144 processed text and ``validatehash`` is a bool indicating whether the
144 returned text should be checked for hash integrity.
145 returned text should be checked for hash integrity.
145 """
146 """
146 return _processflagsfunc(revlog, text, flags, b'raw')[1]
147 return _processflagsfunc(revlog, text, flags, b'raw')[1]
147
148
148
149
149 def _processflagsfunc(revlog, text, flags, operation):
150 def _processflagsfunc(revlog, text, flags, operation):
150 """internal function to process flag on a revlog
151 """internal function to process flag on a revlog
151
152
152 This function is private to this module, code should never needs to call it
153 This function is private to this module, code should never needs to call it
153 directly."""
154 directly."""
154 # fast path: no flag processors will run
155 # fast path: no flag processors will run
155 if flags == 0:
156 if flags == 0:
156 return text, True
157 return text, True
157 if operation not in (b'read', b'write', b'raw'):
158 if operation not in (b'read', b'write', b'raw'):
158 raise error.ProgrammingError(_(b"invalid '%s' operation") % operation)
159 raise error.ProgrammingError(_(b"invalid '%s' operation") % operation)
159 # Check all flags are known.
160 # Check all flags are known.
160 if flags & ~REVIDX_KNOWN_FLAGS:
161 if flags & ~REVIDX_KNOWN_FLAGS:
161 raise revlog._flagserrorclass(
162 raise revlog._flagserrorclass(
162 _(b"incompatible revision flag '%#x'")
163 _(b"incompatible revision flag '%#x'")
163 % (flags & ~REVIDX_KNOWN_FLAGS)
164 % (flags & ~REVIDX_KNOWN_FLAGS)
164 )
165 )
165 validatehash = True
166 validatehash = True
166 # Depending on the operation (read or write), the order might be
167 # Depending on the operation (read or write), the order might be
167 # reversed due to non-commutative transforms.
168 # reversed due to non-commutative transforms.
168 orderedflags = REVIDX_FLAGS_ORDER
169 orderedflags = REVIDX_FLAGS_ORDER
169 if operation == b'write':
170 if operation == b'write':
170 orderedflags = reversed(orderedflags)
171 orderedflags = reversed(orderedflags)
171
172
172 for flag in orderedflags:
173 for flag in orderedflags:
173 # If a flagprocessor has been registered for a known flag, apply the
174 # If a flagprocessor has been registered for a known flag, apply the
174 # related operation transform and update result tuple.
175 # related operation transform and update result tuple.
175 if flag & flags:
176 if flag & flags:
176 vhash = True
177 vhash = True
177
178
178 if flag not in revlog._flagprocessors:
179 if flag not in revlog._flagprocessors:
179 message = _(b"missing processor for flag '%#x'") % flag
180 message = _(b"missing processor for flag '%#x'") % flag
180 raise revlog._flagserrorclass(message)
181 raise revlog._flagserrorclass(message)
181
182
182 processor = revlog._flagprocessors[flag]
183 processor = revlog._flagprocessors[flag]
183 if processor is not None:
184 if processor is not None:
184 readtransform, writetransform, rawtransform = processor
185 readtransform, writetransform, rawtransform = processor
185
186
186 if operation == b'raw':
187 if operation == b'raw':
187 vhash = rawtransform(revlog, text)
188 vhash = rawtransform(revlog, text)
188 elif operation == b'read':
189 elif operation == b'read':
189 text, vhash = readtransform(revlog, text)
190 text, vhash = readtransform(revlog, text)
190 else: # write operation
191 else: # write operation
191 text, vhash = writetransform(revlog, text)
192 text, vhash = writetransform(revlog, text)
192 validatehash = validatehash and vhash
193 validatehash = validatehash and vhash
193
194
194 return text, validatehash
195 return text, validatehash
@@ -1,117 +1,200 b''
1 use crate::errors::HgError;
1 use crate::errors::HgError;
2 use crate::repo::Repo;
2 use crate::repo::Repo;
3 use crate::revlog::path_encode::path_encode;
3 use crate::revlog::path_encode::path_encode;
4 use crate::revlog::revlog::RevlogEntry;
4 use crate::revlog::revlog::RevlogEntry;
5 use crate::revlog::revlog::{Revlog, RevlogError};
5 use crate::revlog::revlog::{Revlog, RevlogError};
6 use crate::revlog::NodePrefix;
6 use crate::revlog::NodePrefix;
7 use crate::revlog::Revision;
7 use crate::revlog::Revision;
8 use crate::utils::files::get_path_from_bytes;
8 use crate::utils::files::get_path_from_bytes;
9 use crate::utils::hg_path::HgPath;
9 use crate::utils::hg_path::HgPath;
10 use crate::utils::SliceExt;
10 use crate::utils::SliceExt;
11 use std::path::PathBuf;
11 use std::path::PathBuf;
12
12
13 /// A specialized `Revlog` to work with file data logs.
13 /// A specialized `Revlog` to work with file data logs.
14 pub struct Filelog {
14 pub struct Filelog {
15 /// The generic `revlog` format.
15 /// The generic `revlog` format.
16 revlog: Revlog,
16 revlog: Revlog,
17 }
17 }
18
18
19 impl Filelog {
19 impl Filelog {
20 pub fn open(repo: &Repo, file_path: &HgPath) -> Result<Self, HgError> {
20 pub fn open(repo: &Repo, file_path: &HgPath) -> Result<Self, HgError> {
21 let index_path = store_path(file_path, b".i");
21 let index_path = store_path(file_path, b".i");
22 let data_path = store_path(file_path, b".d");
22 let data_path = store_path(file_path, b".d");
23 let revlog = Revlog::open(repo, index_path, Some(&data_path))?;
23 let revlog = Revlog::open(repo, index_path, Some(&data_path))?;
24 Ok(Self { revlog })
24 Ok(Self { revlog })
25 }
25 }
26
26
27 /// The given node ID is that of the file as found in a filelog, not of a
27 /// The given node ID is that of the file as found in a filelog, not of a
28 /// changeset.
28 /// changeset.
29 pub fn data_for_node(
29 pub fn data_for_node(
30 &self,
30 &self,
31 file_node: impl Into<NodePrefix>,
31 file_node: impl Into<NodePrefix>,
32 ) -> Result<FilelogRevisionData, RevlogError> {
32 ) -> Result<FilelogRevisionData, RevlogError> {
33 let file_rev = self.revlog.rev_from_node(file_node.into())?;
33 let file_rev = self.revlog.rev_from_node(file_node.into())?;
34 self.data_for_rev(file_rev)
34 self.data_for_rev(file_rev)
35 }
35 }
36
36
37 /// The given revision is that of the file as found in a filelog, not of a
37 /// The given revision is that of the file as found in a filelog, not of a
38 /// changeset.
38 /// changeset.
39 pub fn data_for_rev(
39 pub fn data_for_rev(
40 &self,
40 &self,
41 file_rev: Revision,
41 file_rev: Revision,
42 ) -> Result<FilelogRevisionData, RevlogError> {
42 ) -> Result<FilelogRevisionData, RevlogError> {
43 let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?.into_owned();
43 let data: Vec<u8> = self.revlog.get_rev_data(file_rev)?.into_owned();
44 Ok(FilelogRevisionData(data.into()))
44 Ok(FilelogRevisionData(data.into()))
45 }
45 }
46
46
47 /// The given node ID is that of the file as found in a filelog, not of a
47 /// The given node ID is that of the file as found in a filelog, not of a
48 /// changeset.
48 /// changeset.
49 pub fn entry_for_node(
49 pub fn entry_for_node(
50 &self,
50 &self,
51 file_node: impl Into<NodePrefix>,
51 file_node: impl Into<NodePrefix>,
52 ) -> Result<FilelogEntry, RevlogError> {
52 ) -> Result<FilelogEntry, RevlogError> {
53 let file_rev = self.revlog.rev_from_node(file_node.into())?;
53 let file_rev = self.revlog.rev_from_node(file_node.into())?;
54 self.entry_for_rev(file_rev)
54 self.entry_for_rev(file_rev)
55 }
55 }
56
56
57 /// The given revision is that of the file as found in a filelog, not of a
57 /// The given revision is that of the file as found in a filelog, not of a
58 /// changeset.
58 /// changeset.
59 pub fn entry_for_rev(
59 pub fn entry_for_rev(
60 &self,
60 &self,
61 file_rev: Revision,
61 file_rev: Revision,
62 ) -> Result<FilelogEntry, RevlogError> {
62 ) -> Result<FilelogEntry, RevlogError> {
63 Ok(FilelogEntry(self.revlog.get_entry(file_rev)?))
63 Ok(FilelogEntry(self.revlog.get_entry(file_rev)?))
64 }
64 }
65 }
65 }
66
66
67 fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf {
67 fn store_path(hg_path: &HgPath, suffix: &[u8]) -> PathBuf {
68 let encoded_bytes =
68 let encoded_bytes =
69 path_encode(&[b"data/", hg_path.as_bytes(), suffix].concat());
69 path_encode(&[b"data/", hg_path.as_bytes(), suffix].concat());
70 get_path_from_bytes(&encoded_bytes).into()
70 get_path_from_bytes(&encoded_bytes).into()
71 }
71 }
72
72
73 pub struct FilelogEntry<'a>(RevlogEntry<'a>);
73 pub struct FilelogEntry<'a>(RevlogEntry<'a>);
74
74
75 impl FilelogEntry<'_> {
75 impl FilelogEntry<'_> {
76 /// `self.data()` can be expensive, with decompression and delta
77 /// resolution.
78 ///
79 /// *Without* paying this cost, based on revlog index information
80 /// including `RevlogEntry::uncompressed_len`:
81 ///
82 /// * Returns `true` if the length that `self.data().file_data().len()`
83 /// would return is definitely **not equal** to `other_len`.
84 /// * Returns `false` if available information is inconclusive.
85 pub fn file_data_len_not_equal_to(&self, other_len: u64) -> bool {
86 // Relevant code that implement this behavior in Python code:
87 // basefilectx.cmp, filelog.size, storageutil.filerevisioncopied,
88 // revlog.size, revlog.rawsize
89
90 // Let’s call `file_data_len` what would be returned by
91 // `self.data().file_data().len()`.
92
93 if self.0.is_cencored() {
94 let file_data_len = 0;
95 return other_len != file_data_len;
96 }
97
98 if self.0.has_length_affecting_flag_processor() {
99 // We can’t conclude anything about `file_data_len`.
100 return false;
101 }
102
103 // Revlog revisions (usually) have metadata for the size of
104 // their data after decompression and delta resolution
105 // as would be returned by `Revlog::get_rev_data`.
106 //
107 // For filelogs this is the file’s contents preceded by an optional
108 // metadata block.
109 let uncompressed_len = if let Some(l) = self.0.uncompressed_len() {
110 l as u64
111 } else {
112 // The field was set to -1, the actual uncompressed len is unknown.
113 // We need to decompress to say more.
114 return false;
115 };
116 // `uncompressed_len = file_data_len + optional_metadata_len`,
117 // so `file_data_len <= uncompressed_len`.
118 if uncompressed_len < other_len {
119 // Transitively, `file_data_len < other_len`.
120 // So `other_len != file_data_len` definitely.
121 return true;
122 }
123
124 if uncompressed_len == other_len + 4 {
125 // It’s possible that `file_data_len == other_len` with an empty
126 // metadata block (2 start marker bytes + 2 end marker bytes).
127 // This happens when there wouldn’t otherwise be metadata, but
128 // the first 2 bytes of file data happen to match a start marker
129 // and would be ambiguous.
130 return false;
131 }
132
133 if !self.0.has_p1() {
134 // There may or may not be copy metadata, so we can’t deduce more
135 // about `file_data_len` without computing file data.
136 return false;
137 }
138
139 // Filelog ancestry is not meaningful in the way changelog ancestry is.
140 // It only provides hints to delta generation.
141 // p1 and p2 are set to null when making a copy or rename since
142 // contents are likely unrelatedto what might have previously existed
143 // at the destination path.
144 //
145 // Conversely, since here p1 is non-null, there is no copy metadata.
146 // Note that this reasoning may be invalidated in the presence of
147 // merges made by some previous versions of Mercurial that
148 // swapped p1 and p2. See <https://bz.mercurial-scm.org/show_bug.cgi?id=6528>
149 // and `tests/test-issue6528.t`.
150 //
151 // Since copy metadata is currently the only kind of metadata
152 // kept in revlog data of filelogs,
153 // this `FilelogEntry` does not have such metadata:
154 let file_data_len = uncompressed_len;
155
156 return file_data_len != other_len;
157 }
158
76 pub fn data(&self) -> Result<FilelogRevisionData, HgError> {
159 pub fn data(&self) -> Result<FilelogRevisionData, HgError> {
77 Ok(FilelogRevisionData(self.0.data()?.into_owned()))
160 Ok(FilelogRevisionData(self.0.data()?.into_owned()))
78 }
161 }
79 }
162 }
80
163
81 /// The data for one revision in a filelog, uncompressed and delta-resolved.
164 /// The data for one revision in a filelog, uncompressed and delta-resolved.
82 pub struct FilelogRevisionData(Vec<u8>);
165 pub struct FilelogRevisionData(Vec<u8>);
83
166
84 impl FilelogRevisionData {
167 impl FilelogRevisionData {
85 /// Split into metadata and data
168 /// Split into metadata and data
86 pub fn split(&self) -> Result<(Option<&[u8]>, &[u8]), HgError> {
169 pub fn split(&self) -> Result<(Option<&[u8]>, &[u8]), HgError> {
87 const DELIMITER: &[u8; 2] = &[b'\x01', b'\n'];
170 const DELIMITER: &[u8; 2] = &[b'\x01', b'\n'];
88
171
89 if let Some(rest) = self.0.drop_prefix(DELIMITER) {
172 if let Some(rest) = self.0.drop_prefix(DELIMITER) {
90 if let Some((metadata, data)) = rest.split_2_by_slice(DELIMITER) {
173 if let Some((metadata, data)) = rest.split_2_by_slice(DELIMITER) {
91 Ok((Some(metadata), data))
174 Ok((Some(metadata), data))
92 } else {
175 } else {
93 Err(HgError::corrupted(
176 Err(HgError::corrupted(
94 "Missing metadata end delimiter in filelog entry",
177 "Missing metadata end delimiter in filelog entry",
95 ))
178 ))
96 }
179 }
97 } else {
180 } else {
98 Ok((None, &self.0))
181 Ok((None, &self.0))
99 }
182 }
100 }
183 }
101
184
102 /// Returns the file contents at this revision, stripped of any metadata
185 /// Returns the file contents at this revision, stripped of any metadata
103 pub fn file_data(&self) -> Result<&[u8], HgError> {
186 pub fn file_data(&self) -> Result<&[u8], HgError> {
104 let (_metadata, data) = self.split()?;
187 let (_metadata, data) = self.split()?;
105 Ok(data)
188 Ok(data)
106 }
189 }
107
190
108 /// Consume the entry, and convert it into data, discarding any metadata,
191 /// Consume the entry, and convert it into data, discarding any metadata,
109 /// if present.
192 /// if present.
110 pub fn into_file_data(self) -> Result<Vec<u8>, HgError> {
193 pub fn into_file_data(self) -> Result<Vec<u8>, HgError> {
111 if let (Some(_metadata), data) = self.split()? {
194 if let (Some(_metadata), data) = self.split()? {
112 Ok(data.to_owned())
195 Ok(data.to_owned())
113 } else {
196 } else {
114 Ok(self.0)
197 Ok(self.0)
115 }
198 }
116 }
199 }
117 }
200 }
@@ -1,524 +1,528 b''
1 use std::convert::TryInto;
1 use std::convert::TryInto;
2 use std::ops::Deref;
2 use std::ops::Deref;
3
3
4 use byteorder::{BigEndian, ByteOrder};
4 use byteorder::{BigEndian, ByteOrder};
5
5
6 use crate::errors::HgError;
6 use crate::errors::HgError;
7 use crate::revlog::node::Node;
7 use crate::revlog::node::Node;
8 use crate::revlog::{Revision, NULL_REVISION};
8 use crate::revlog::{Revision, NULL_REVISION};
9
9
10 pub const INDEX_ENTRY_SIZE: usize = 64;
10 pub const INDEX_ENTRY_SIZE: usize = 64;
11
11
12 pub struct IndexHeader {
12 pub struct IndexHeader {
13 header_bytes: [u8; 4],
13 header_bytes: [u8; 4],
14 }
14 }
15
15
16 #[derive(Copy, Clone)]
16 #[derive(Copy, Clone)]
17 pub struct IndexHeaderFlags {
17 pub struct IndexHeaderFlags {
18 flags: u16,
18 flags: u16,
19 }
19 }
20
20
21 /// Corresponds to the high bits of `_format_flags` in python
21 /// Corresponds to the high bits of `_format_flags` in python
22 impl IndexHeaderFlags {
22 impl IndexHeaderFlags {
23 /// Corresponds to FLAG_INLINE_DATA in python
23 /// Corresponds to FLAG_INLINE_DATA in python
24 pub fn is_inline(self) -> bool {
24 pub fn is_inline(self) -> bool {
25 return self.flags & 1 != 0;
25 return self.flags & 1 != 0;
26 }
26 }
27 /// Corresponds to FLAG_GENERALDELTA in python
27 /// Corresponds to FLAG_GENERALDELTA in python
28 pub fn uses_generaldelta(self) -> bool {
28 pub fn uses_generaldelta(self) -> bool {
29 return self.flags & 2 != 0;
29 return self.flags & 2 != 0;
30 }
30 }
31 }
31 }
32
32
33 /// Corresponds to the INDEX_HEADER structure,
33 /// Corresponds to the INDEX_HEADER structure,
34 /// which is parsed as a `header` variable in `_loadindex` in `revlog.py`
34 /// which is parsed as a `header` variable in `_loadindex` in `revlog.py`
35 impl IndexHeader {
35 impl IndexHeader {
36 fn format_flags(&self) -> IndexHeaderFlags {
36 fn format_flags(&self) -> IndexHeaderFlags {
37 // No "unknown flags" check here, unlike in python. Maybe there should
37 // No "unknown flags" check here, unlike in python. Maybe there should
38 // be.
38 // be.
39 return IndexHeaderFlags {
39 return IndexHeaderFlags {
40 flags: BigEndian::read_u16(&self.header_bytes[0..2]),
40 flags: BigEndian::read_u16(&self.header_bytes[0..2]),
41 };
41 };
42 }
42 }
43
43
44 /// The only revlog version currently supported by rhg.
44 /// The only revlog version currently supported by rhg.
45 const REVLOGV1: u16 = 1;
45 const REVLOGV1: u16 = 1;
46
46
47 /// Corresponds to `_format_version` in Python.
47 /// Corresponds to `_format_version` in Python.
48 fn format_version(&self) -> u16 {
48 fn format_version(&self) -> u16 {
49 return BigEndian::read_u16(&self.header_bytes[2..4]);
49 return BigEndian::read_u16(&self.header_bytes[2..4]);
50 }
50 }
51
51
52 const EMPTY_INDEX_HEADER: IndexHeader = IndexHeader {
52 const EMPTY_INDEX_HEADER: IndexHeader = IndexHeader {
53 // We treat an empty file as a valid index with no entries.
53 // We treat an empty file as a valid index with no entries.
54 // Here we make an arbitrary choice of what we assume the format of the
54 // Here we make an arbitrary choice of what we assume the format of the
55 // index to be (V1, using generaldelta).
55 // index to be (V1, using generaldelta).
56 // This doesn't matter too much, since we're only doing read-only
56 // This doesn't matter too much, since we're only doing read-only
57 // access. but the value corresponds to the `new_header` variable in
57 // access. but the value corresponds to the `new_header` variable in
58 // `revlog.py`, `_loadindex`
58 // `revlog.py`, `_loadindex`
59 header_bytes: [0, 3, 0, 1],
59 header_bytes: [0, 3, 0, 1],
60 };
60 };
61
61
62 fn parse(index_bytes: &[u8]) -> Result<IndexHeader, HgError> {
62 fn parse(index_bytes: &[u8]) -> Result<IndexHeader, HgError> {
63 if index_bytes.len() == 0 {
63 if index_bytes.len() == 0 {
64 return Ok(IndexHeader::EMPTY_INDEX_HEADER);
64 return Ok(IndexHeader::EMPTY_INDEX_HEADER);
65 }
65 }
66 if index_bytes.len() < 4 {
66 if index_bytes.len() < 4 {
67 return Err(HgError::corrupted(
67 return Err(HgError::corrupted(
68 "corrupted revlog: can't read the index format header",
68 "corrupted revlog: can't read the index format header",
69 ));
69 ));
70 }
70 }
71 return Ok(IndexHeader {
71 return Ok(IndexHeader {
72 header_bytes: {
72 header_bytes: {
73 let bytes: [u8; 4] =
73 let bytes: [u8; 4] =
74 index_bytes[0..4].try_into().expect("impossible");
74 index_bytes[0..4].try_into().expect("impossible");
75 bytes
75 bytes
76 },
76 },
77 });
77 });
78 }
78 }
79 }
79 }
80
80
81 /// A Revlog index
81 /// A Revlog index
82 pub struct Index {
82 pub struct Index {
83 bytes: Box<dyn Deref<Target = [u8]> + Send>,
83 bytes: Box<dyn Deref<Target = [u8]> + Send>,
84 /// Offsets of starts of index blocks.
84 /// Offsets of starts of index blocks.
85 /// Only needed when the index is interleaved with data.
85 /// Only needed when the index is interleaved with data.
86 offsets: Option<Vec<usize>>,
86 offsets: Option<Vec<usize>>,
87 uses_generaldelta: bool,
87 uses_generaldelta: bool,
88 }
88 }
89
89
90 impl Index {
90 impl Index {
91 /// Create an index from bytes.
91 /// Create an index from bytes.
92 /// Calculate the start of each entry when is_inline is true.
92 /// Calculate the start of each entry when is_inline is true.
93 pub fn new(
93 pub fn new(
94 bytes: Box<dyn Deref<Target = [u8]> + Send>,
94 bytes: Box<dyn Deref<Target = [u8]> + Send>,
95 ) -> Result<Self, HgError> {
95 ) -> Result<Self, HgError> {
96 let header = IndexHeader::parse(bytes.as_ref())?;
96 let header = IndexHeader::parse(bytes.as_ref())?;
97
97
98 if header.format_version() != IndexHeader::REVLOGV1 {
98 if header.format_version() != IndexHeader::REVLOGV1 {
99 // A proper new version should have had a repo/store
99 // A proper new version should have had a repo/store
100 // requirement.
100 // requirement.
101 return Err(HgError::corrupted("unsupported revlog version"));
101 return Err(HgError::corrupted("unsupported revlog version"));
102 }
102 }
103
103
104 // This is only correct because we know version is REVLOGV1.
104 // This is only correct because we know version is REVLOGV1.
105 // In v2 we always use generaldelta, while in v0 we never use
105 // In v2 we always use generaldelta, while in v0 we never use
106 // generaldelta. Similar for [is_inline] (it's only used in v1).
106 // generaldelta. Similar for [is_inline] (it's only used in v1).
107 let uses_generaldelta = header.format_flags().uses_generaldelta();
107 let uses_generaldelta = header.format_flags().uses_generaldelta();
108
108
109 if header.format_flags().is_inline() {
109 if header.format_flags().is_inline() {
110 let mut offset: usize = 0;
110 let mut offset: usize = 0;
111 let mut offsets = Vec::new();
111 let mut offsets = Vec::new();
112
112
113 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
113 while offset + INDEX_ENTRY_SIZE <= bytes.len() {
114 offsets.push(offset);
114 offsets.push(offset);
115 let end = offset + INDEX_ENTRY_SIZE;
115 let end = offset + INDEX_ENTRY_SIZE;
116 let entry = IndexEntry {
116 let entry = IndexEntry {
117 bytes: &bytes[offset..end],
117 bytes: &bytes[offset..end],
118 offset_override: None,
118 offset_override: None,
119 };
119 };
120
120
121 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
121 offset += INDEX_ENTRY_SIZE + entry.compressed_len() as usize;
122 }
122 }
123
123
124 if offset == bytes.len() {
124 if offset == bytes.len() {
125 Ok(Self {
125 Ok(Self {
126 bytes,
126 bytes,
127 offsets: Some(offsets),
127 offsets: Some(offsets),
128 uses_generaldelta,
128 uses_generaldelta,
129 })
129 })
130 } else {
130 } else {
131 Err(HgError::corrupted("unexpected inline revlog length")
131 Err(HgError::corrupted("unexpected inline revlog length")
132 .into())
132 .into())
133 }
133 }
134 } else {
134 } else {
135 Ok(Self {
135 Ok(Self {
136 bytes,
136 bytes,
137 offsets: None,
137 offsets: None,
138 uses_generaldelta,
138 uses_generaldelta,
139 })
139 })
140 }
140 }
141 }
141 }
142
142
143 pub fn uses_generaldelta(&self) -> bool {
143 pub fn uses_generaldelta(&self) -> bool {
144 self.uses_generaldelta
144 self.uses_generaldelta
145 }
145 }
146
146
147 /// Value of the inline flag.
147 /// Value of the inline flag.
148 pub fn is_inline(&self) -> bool {
148 pub fn is_inline(&self) -> bool {
149 self.offsets.is_some()
149 self.offsets.is_some()
150 }
150 }
151
151
152 /// Return a slice of bytes if `revlog` is inline. Panic if not.
152 /// Return a slice of bytes if `revlog` is inline. Panic if not.
153 pub fn data(&self, start: usize, end: usize) -> &[u8] {
153 pub fn data(&self, start: usize, end: usize) -> &[u8] {
154 if !self.is_inline() {
154 if !self.is_inline() {
155 panic!("tried to access data in the index of a revlog that is not inline");
155 panic!("tried to access data in the index of a revlog that is not inline");
156 }
156 }
157 &self.bytes[start..end]
157 &self.bytes[start..end]
158 }
158 }
159
159
160 /// Return number of entries of the revlog index.
160 /// Return number of entries of the revlog index.
161 pub fn len(&self) -> usize {
161 pub fn len(&self) -> usize {
162 if let Some(offsets) = &self.offsets {
162 if let Some(offsets) = &self.offsets {
163 offsets.len()
163 offsets.len()
164 } else {
164 } else {
165 self.bytes.len() / INDEX_ENTRY_SIZE
165 self.bytes.len() / INDEX_ENTRY_SIZE
166 }
166 }
167 }
167 }
168
168
169 /// Returns `true` if the `Index` has zero `entries`.
169 /// Returns `true` if the `Index` has zero `entries`.
170 pub fn is_empty(&self) -> bool {
170 pub fn is_empty(&self) -> bool {
171 self.len() == 0
171 self.len() == 0
172 }
172 }
173
173
174 /// Return the index entry corresponding to the given revision if it
174 /// Return the index entry corresponding to the given revision if it
175 /// exists.
175 /// exists.
176 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
176 pub fn get_entry(&self, rev: Revision) -> Option<IndexEntry> {
177 if rev == NULL_REVISION {
177 if rev == NULL_REVISION {
178 return None;
178 return None;
179 }
179 }
180 if let Some(offsets) = &self.offsets {
180 if let Some(offsets) = &self.offsets {
181 self.get_entry_inline(rev, offsets)
181 self.get_entry_inline(rev, offsets)
182 } else {
182 } else {
183 self.get_entry_separated(rev)
183 self.get_entry_separated(rev)
184 }
184 }
185 }
185 }
186
186
187 fn get_entry_inline(
187 fn get_entry_inline(
188 &self,
188 &self,
189 rev: Revision,
189 rev: Revision,
190 offsets: &[usize],
190 offsets: &[usize],
191 ) -> Option<IndexEntry> {
191 ) -> Option<IndexEntry> {
192 let start = *offsets.get(rev as usize)?;
192 let start = *offsets.get(rev as usize)?;
193 let end = start.checked_add(INDEX_ENTRY_SIZE)?;
193 let end = start.checked_add(INDEX_ENTRY_SIZE)?;
194 let bytes = &self.bytes[start..end];
194 let bytes = &self.bytes[start..end];
195
195
196 // See IndexEntry for an explanation of this override.
196 // See IndexEntry for an explanation of this override.
197 let offset_override = Some(end);
197 let offset_override = Some(end);
198
198
199 Some(IndexEntry {
199 Some(IndexEntry {
200 bytes,
200 bytes,
201 offset_override,
201 offset_override,
202 })
202 })
203 }
203 }
204
204
205 fn get_entry_separated(&self, rev: Revision) -> Option<IndexEntry> {
205 fn get_entry_separated(&self, rev: Revision) -> Option<IndexEntry> {
206 let max_rev = self.bytes.len() / INDEX_ENTRY_SIZE;
206 let max_rev = self.bytes.len() / INDEX_ENTRY_SIZE;
207 if rev as usize >= max_rev {
207 if rev as usize >= max_rev {
208 return None;
208 return None;
209 }
209 }
210 let start = rev as usize * INDEX_ENTRY_SIZE;
210 let start = rev as usize * INDEX_ENTRY_SIZE;
211 let end = start + INDEX_ENTRY_SIZE;
211 let end = start + INDEX_ENTRY_SIZE;
212 let bytes = &self.bytes[start..end];
212 let bytes = &self.bytes[start..end];
213
213
214 // Override the offset of the first revision as its bytes are used
214 // Override the offset of the first revision as its bytes are used
215 // for the index's metadata (saving space because it is always 0)
215 // for the index's metadata (saving space because it is always 0)
216 let offset_override = if rev == 0 { Some(0) } else { None };
216 let offset_override = if rev == 0 { Some(0) } else { None };
217
217
218 Some(IndexEntry {
218 Some(IndexEntry {
219 bytes,
219 bytes,
220 offset_override,
220 offset_override,
221 })
221 })
222 }
222 }
223 }
223 }
224
224
225 impl super::RevlogIndex for Index {
225 impl super::RevlogIndex for Index {
226 fn len(&self) -> usize {
226 fn len(&self) -> usize {
227 self.len()
227 self.len()
228 }
228 }
229
229
230 fn node(&self, rev: Revision) -> Option<&Node> {
230 fn node(&self, rev: Revision) -> Option<&Node> {
231 self.get_entry(rev).map(|entry| entry.hash())
231 self.get_entry(rev).map(|entry| entry.hash())
232 }
232 }
233 }
233 }
234
234
235 #[derive(Debug)]
235 #[derive(Debug)]
236 pub struct IndexEntry<'a> {
236 pub struct IndexEntry<'a> {
237 bytes: &'a [u8],
237 bytes: &'a [u8],
238 /// Allows to override the offset value of the entry.
238 /// Allows to override the offset value of the entry.
239 ///
239 ///
240 /// For interleaved index and data, the offset stored in the index
240 /// For interleaved index and data, the offset stored in the index
241 /// corresponds to the separated data offset.
241 /// corresponds to the separated data offset.
242 /// It has to be overridden with the actual offset in the interleaved
242 /// It has to be overridden with the actual offset in the interleaved
243 /// index which is just after the index block.
243 /// index which is just after the index block.
244 ///
244 ///
245 /// For separated index and data, the offset stored in the first index
245 /// For separated index and data, the offset stored in the first index
246 /// entry is mixed with the index headers.
246 /// entry is mixed with the index headers.
247 /// It has to be overridden with 0.
247 /// It has to be overridden with 0.
248 offset_override: Option<usize>,
248 offset_override: Option<usize>,
249 }
249 }
250
250
251 impl<'a> IndexEntry<'a> {
251 impl<'a> IndexEntry<'a> {
252 /// Return the offset of the data.
252 /// Return the offset of the data.
253 pub fn offset(&self) -> usize {
253 pub fn offset(&self) -> usize {
254 if let Some(offset_override) = self.offset_override {
254 if let Some(offset_override) = self.offset_override {
255 offset_override
255 offset_override
256 } else {
256 } else {
257 let mut bytes = [0; 8];
257 let mut bytes = [0; 8];
258 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
258 bytes[2..8].copy_from_slice(&self.bytes[0..=5]);
259 BigEndian::read_u64(&bytes[..]) as usize
259 BigEndian::read_u64(&bytes[..]) as usize
260 }
260 }
261 }
261 }
262
262
263 pub fn flags(&self) -> u16 {
264 BigEndian::read_u16(&self.bytes[6..=7])
265 }
266
263 /// Return the compressed length of the data.
267 /// Return the compressed length of the data.
264 pub fn compressed_len(&self) -> u32 {
268 pub fn compressed_len(&self) -> u32 {
265 BigEndian::read_u32(&self.bytes[8..=11])
269 BigEndian::read_u32(&self.bytes[8..=11])
266 }
270 }
267
271
268 /// Return the uncompressed length of the data.
272 /// Return the uncompressed length of the data.
269 pub fn uncompressed_len(&self) -> i32 {
273 pub fn uncompressed_len(&self) -> i32 {
270 BigEndian::read_i32(&self.bytes[12..=15])
274 BigEndian::read_i32(&self.bytes[12..=15])
271 }
275 }
272
276
273 /// Return the revision upon which the data has been derived.
277 /// Return the revision upon which the data has been derived.
274 pub fn base_revision_or_base_of_delta_chain(&self) -> Revision {
278 pub fn base_revision_or_base_of_delta_chain(&self) -> Revision {
275 // TODO Maybe return an Option when base_revision == rev?
279 // TODO Maybe return an Option when base_revision == rev?
276 // Requires to add rev to IndexEntry
280 // Requires to add rev to IndexEntry
277
281
278 BigEndian::read_i32(&self.bytes[16..])
282 BigEndian::read_i32(&self.bytes[16..])
279 }
283 }
280
284
281 pub fn p1(&self) -> Revision {
285 pub fn p1(&self) -> Revision {
282 BigEndian::read_i32(&self.bytes[24..])
286 BigEndian::read_i32(&self.bytes[24..])
283 }
287 }
284
288
285 pub fn p2(&self) -> Revision {
289 pub fn p2(&self) -> Revision {
286 BigEndian::read_i32(&self.bytes[28..])
290 BigEndian::read_i32(&self.bytes[28..])
287 }
291 }
288
292
289 /// Return the hash of revision's full text.
293 /// Return the hash of revision's full text.
290 ///
294 ///
291 /// Currently, SHA-1 is used and only the first 20 bytes of this field
295 /// Currently, SHA-1 is used and only the first 20 bytes of this field
292 /// are used.
296 /// are used.
293 pub fn hash(&self) -> &'a Node {
297 pub fn hash(&self) -> &'a Node {
294 (&self.bytes[32..52]).try_into().unwrap()
298 (&self.bytes[32..52]).try_into().unwrap()
295 }
299 }
296 }
300 }
297
301
298 #[cfg(test)]
302 #[cfg(test)]
299 mod tests {
303 mod tests {
300 use super::*;
304 use super::*;
301
305
302 #[cfg(test)]
306 #[cfg(test)]
303 #[derive(Debug, Copy, Clone)]
307 #[derive(Debug, Copy, Clone)]
304 pub struct IndexEntryBuilder {
308 pub struct IndexEntryBuilder {
305 is_first: bool,
309 is_first: bool,
306 is_inline: bool,
310 is_inline: bool,
307 is_general_delta: bool,
311 is_general_delta: bool,
308 version: u16,
312 version: u16,
309 offset: usize,
313 offset: usize,
310 compressed_len: usize,
314 compressed_len: usize,
311 uncompressed_len: usize,
315 uncompressed_len: usize,
312 base_revision_or_base_of_delta_chain: Revision,
316 base_revision_or_base_of_delta_chain: Revision,
313 }
317 }
314
318
315 #[cfg(test)]
319 #[cfg(test)]
316 impl IndexEntryBuilder {
320 impl IndexEntryBuilder {
317 pub fn new() -> Self {
321 pub fn new() -> Self {
318 Self {
322 Self {
319 is_first: false,
323 is_first: false,
320 is_inline: false,
324 is_inline: false,
321 is_general_delta: true,
325 is_general_delta: true,
322 version: 2,
326 version: 2,
323 offset: 0,
327 offset: 0,
324 compressed_len: 0,
328 compressed_len: 0,
325 uncompressed_len: 0,
329 uncompressed_len: 0,
326 base_revision_or_base_of_delta_chain: 0,
330 base_revision_or_base_of_delta_chain: 0,
327 }
331 }
328 }
332 }
329
333
330 pub fn is_first(&mut self, value: bool) -> &mut Self {
334 pub fn is_first(&mut self, value: bool) -> &mut Self {
331 self.is_first = value;
335 self.is_first = value;
332 self
336 self
333 }
337 }
334
338
335 pub fn with_inline(&mut self, value: bool) -> &mut Self {
339 pub fn with_inline(&mut self, value: bool) -> &mut Self {
336 self.is_inline = value;
340 self.is_inline = value;
337 self
341 self
338 }
342 }
339
343
340 pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
344 pub fn with_general_delta(&mut self, value: bool) -> &mut Self {
341 self.is_general_delta = value;
345 self.is_general_delta = value;
342 self
346 self
343 }
347 }
344
348
345 pub fn with_version(&mut self, value: u16) -> &mut Self {
349 pub fn with_version(&mut self, value: u16) -> &mut Self {
346 self.version = value;
350 self.version = value;
347 self
351 self
348 }
352 }
349
353
350 pub fn with_offset(&mut self, value: usize) -> &mut Self {
354 pub fn with_offset(&mut self, value: usize) -> &mut Self {
351 self.offset = value;
355 self.offset = value;
352 self
356 self
353 }
357 }
354
358
355 pub fn with_compressed_len(&mut self, value: usize) -> &mut Self {
359 pub fn with_compressed_len(&mut self, value: usize) -> &mut Self {
356 self.compressed_len = value;
360 self.compressed_len = value;
357 self
361 self
358 }
362 }
359
363
360 pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self {
364 pub fn with_uncompressed_len(&mut self, value: usize) -> &mut Self {
361 self.uncompressed_len = value;
365 self.uncompressed_len = value;
362 self
366 self
363 }
367 }
364
368
365 pub fn with_base_revision_or_base_of_delta_chain(
369 pub fn with_base_revision_or_base_of_delta_chain(
366 &mut self,
370 &mut self,
367 value: Revision,
371 value: Revision,
368 ) -> &mut Self {
372 ) -> &mut Self {
369 self.base_revision_or_base_of_delta_chain = value;
373 self.base_revision_or_base_of_delta_chain = value;
370 self
374 self
371 }
375 }
372
376
373 pub fn build(&self) -> Vec<u8> {
377 pub fn build(&self) -> Vec<u8> {
374 let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE);
378 let mut bytes = Vec::with_capacity(INDEX_ENTRY_SIZE);
375 if self.is_first {
379 if self.is_first {
376 bytes.extend(&match (self.is_general_delta, self.is_inline) {
380 bytes.extend(&match (self.is_general_delta, self.is_inline) {
377 (false, false) => [0u8, 0],
381 (false, false) => [0u8, 0],
378 (false, true) => [0u8, 1],
382 (false, true) => [0u8, 1],
379 (true, false) => [0u8, 2],
383 (true, false) => [0u8, 2],
380 (true, true) => [0u8, 3],
384 (true, true) => [0u8, 3],
381 });
385 });
382 bytes.extend(&self.version.to_be_bytes());
386 bytes.extend(&self.version.to_be_bytes());
383 // Remaining offset bytes.
387 // Remaining offset bytes.
384 bytes.extend(&[0u8; 2]);
388 bytes.extend(&[0u8; 2]);
385 } else {
389 } else {
386 // Offset stored on 48 bits (6 bytes)
390 // Offset stored on 48 bits (6 bytes)
387 bytes.extend(&(self.offset as u64).to_be_bytes()[2..]);
391 bytes.extend(&(self.offset as u64).to_be_bytes()[2..]);
388 }
392 }
389 bytes.extend(&[0u8; 2]); // Revision flags.
393 bytes.extend(&[0u8; 2]); // Revision flags.
390 bytes.extend(&(self.compressed_len as u32).to_be_bytes());
394 bytes.extend(&(self.compressed_len as u32).to_be_bytes());
391 bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
395 bytes.extend(&(self.uncompressed_len as u32).to_be_bytes());
392 bytes.extend(
396 bytes.extend(
393 &self.base_revision_or_base_of_delta_chain.to_be_bytes(),
397 &self.base_revision_or_base_of_delta_chain.to_be_bytes(),
394 );
398 );
395 bytes
399 bytes
396 }
400 }
397 }
401 }
398
402
399 pub fn is_inline(index_bytes: &[u8]) -> bool {
403 pub fn is_inline(index_bytes: &[u8]) -> bool {
400 IndexHeader::parse(index_bytes)
404 IndexHeader::parse(index_bytes)
401 .expect("too short")
405 .expect("too short")
402 .format_flags()
406 .format_flags()
403 .is_inline()
407 .is_inline()
404 }
408 }
405
409
406 pub fn uses_generaldelta(index_bytes: &[u8]) -> bool {
410 pub fn uses_generaldelta(index_bytes: &[u8]) -> bool {
407 IndexHeader::parse(index_bytes)
411 IndexHeader::parse(index_bytes)
408 .expect("too short")
412 .expect("too short")
409 .format_flags()
413 .format_flags()
410 .uses_generaldelta()
414 .uses_generaldelta()
411 }
415 }
412
416
413 pub fn get_version(index_bytes: &[u8]) -> u16 {
417 pub fn get_version(index_bytes: &[u8]) -> u16 {
414 IndexHeader::parse(index_bytes)
418 IndexHeader::parse(index_bytes)
415 .expect("too short")
419 .expect("too short")
416 .format_version()
420 .format_version()
417 }
421 }
418
422
419 #[test]
423 #[test]
420 fn flags_when_no_inline_flag_test() {
424 fn flags_when_no_inline_flag_test() {
421 let bytes = IndexEntryBuilder::new()
425 let bytes = IndexEntryBuilder::new()
422 .is_first(true)
426 .is_first(true)
423 .with_general_delta(false)
427 .with_general_delta(false)
424 .with_inline(false)
428 .with_inline(false)
425 .build();
429 .build();
426
430
427 assert_eq!(is_inline(&bytes), false);
431 assert_eq!(is_inline(&bytes), false);
428 assert_eq!(uses_generaldelta(&bytes), false);
432 assert_eq!(uses_generaldelta(&bytes), false);
429 }
433 }
430
434
431 #[test]
435 #[test]
432 fn flags_when_inline_flag_test() {
436 fn flags_when_inline_flag_test() {
433 let bytes = IndexEntryBuilder::new()
437 let bytes = IndexEntryBuilder::new()
434 .is_first(true)
438 .is_first(true)
435 .with_general_delta(false)
439 .with_general_delta(false)
436 .with_inline(true)
440 .with_inline(true)
437 .build();
441 .build();
438
442
439 assert_eq!(is_inline(&bytes), true);
443 assert_eq!(is_inline(&bytes), true);
440 assert_eq!(uses_generaldelta(&bytes), false);
444 assert_eq!(uses_generaldelta(&bytes), false);
441 }
445 }
442
446
443 #[test]
447 #[test]
444 fn flags_when_inline_and_generaldelta_flags_test() {
448 fn flags_when_inline_and_generaldelta_flags_test() {
445 let bytes = IndexEntryBuilder::new()
449 let bytes = IndexEntryBuilder::new()
446 .is_first(true)
450 .is_first(true)
447 .with_general_delta(true)
451 .with_general_delta(true)
448 .with_inline(true)
452 .with_inline(true)
449 .build();
453 .build();
450
454
451 assert_eq!(is_inline(&bytes), true);
455 assert_eq!(is_inline(&bytes), true);
452 assert_eq!(uses_generaldelta(&bytes), true);
456 assert_eq!(uses_generaldelta(&bytes), true);
453 }
457 }
454
458
455 #[test]
459 #[test]
456 fn test_offset() {
460 fn test_offset() {
457 let bytes = IndexEntryBuilder::new().with_offset(1).build();
461 let bytes = IndexEntryBuilder::new().with_offset(1).build();
458 let entry = IndexEntry {
462 let entry = IndexEntry {
459 bytes: &bytes,
463 bytes: &bytes,
460 offset_override: None,
464 offset_override: None,
461 };
465 };
462
466
463 assert_eq!(entry.offset(), 1)
467 assert_eq!(entry.offset(), 1)
464 }
468 }
465
469
466 #[test]
470 #[test]
467 fn test_with_overridden_offset() {
471 fn test_with_overridden_offset() {
468 let bytes = IndexEntryBuilder::new().with_offset(1).build();
472 let bytes = IndexEntryBuilder::new().with_offset(1).build();
469 let entry = IndexEntry {
473 let entry = IndexEntry {
470 bytes: &bytes,
474 bytes: &bytes,
471 offset_override: Some(2),
475 offset_override: Some(2),
472 };
476 };
473
477
474 assert_eq!(entry.offset(), 2)
478 assert_eq!(entry.offset(), 2)
475 }
479 }
476
480
477 #[test]
481 #[test]
478 fn test_compressed_len() {
482 fn test_compressed_len() {
479 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
483 let bytes = IndexEntryBuilder::new().with_compressed_len(1).build();
480 let entry = IndexEntry {
484 let entry = IndexEntry {
481 bytes: &bytes,
485 bytes: &bytes,
482 offset_override: None,
486 offset_override: None,
483 };
487 };
484
488
485 assert_eq!(entry.compressed_len(), 1)
489 assert_eq!(entry.compressed_len(), 1)
486 }
490 }
487
491
488 #[test]
492 #[test]
489 fn test_uncompressed_len() {
493 fn test_uncompressed_len() {
490 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
494 let bytes = IndexEntryBuilder::new().with_uncompressed_len(1).build();
491 let entry = IndexEntry {
495 let entry = IndexEntry {
492 bytes: &bytes,
496 bytes: &bytes,
493 offset_override: None,
497 offset_override: None,
494 };
498 };
495
499
496 assert_eq!(entry.uncompressed_len(), 1)
500 assert_eq!(entry.uncompressed_len(), 1)
497 }
501 }
498
502
499 #[test]
503 #[test]
500 fn test_base_revision_or_base_of_delta_chain() {
504 fn test_base_revision_or_base_of_delta_chain() {
501 let bytes = IndexEntryBuilder::new()
505 let bytes = IndexEntryBuilder::new()
502 .with_base_revision_or_base_of_delta_chain(1)
506 .with_base_revision_or_base_of_delta_chain(1)
503 .build();
507 .build();
504 let entry = IndexEntry {
508 let entry = IndexEntry {
505 bytes: &bytes,
509 bytes: &bytes,
506 offset_override: None,
510 offset_override: None,
507 };
511 };
508
512
509 assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1)
513 assert_eq!(entry.base_revision_or_base_of_delta_chain(), 1)
510 }
514 }
511
515
512 #[test]
516 #[test]
513 fn version_test() {
517 fn version_test() {
514 let bytes = IndexEntryBuilder::new()
518 let bytes = IndexEntryBuilder::new()
515 .is_first(true)
519 .is_first(true)
516 .with_version(1)
520 .with_version(1)
517 .build();
521 .build();
518
522
519 assert_eq!(get_version(&bytes), 1)
523 assert_eq!(get_version(&bytes), 1)
520 }
524 }
521 }
525 }
522
526
523 #[cfg(test)]
527 #[cfg(test)]
524 pub use tests::IndexEntryBuilder;
528 pub use tests::IndexEntryBuilder;
@@ -1,440 +1,468 b''
1 use std::borrow::Cow;
1 use std::borrow::Cow;
2 use std::convert::TryFrom;
2 use std::convert::TryFrom;
3 use std::io::Read;
3 use std::io::Read;
4 use std::ops::Deref;
4 use std::ops::Deref;
5 use std::path::Path;
5 use std::path::Path;
6
6
7 use flate2::read::ZlibDecoder;
7 use flate2::read::ZlibDecoder;
8 use micro_timer::timed;
8 use micro_timer::timed;
9 use sha1::{Digest, Sha1};
9 use sha1::{Digest, Sha1};
10 use zstd;
10 use zstd;
11
11
12 use super::index::Index;
12 use super::index::Index;
13 use super::node::{NodePrefix, NODE_BYTES_LENGTH, NULL_NODE};
13 use super::node::{NodePrefix, NODE_BYTES_LENGTH, NULL_NODE};
14 use super::nodemap;
14 use super::nodemap;
15 use super::nodemap::{NodeMap, NodeMapError};
15 use super::nodemap::{NodeMap, NodeMapError};
16 use super::nodemap_docket::NodeMapDocket;
16 use super::nodemap_docket::NodeMapDocket;
17 use super::patch;
17 use super::patch;
18 use crate::errors::HgError;
18 use crate::errors::HgError;
19 use crate::repo::Repo;
19 use crate::repo::Repo;
20 use crate::revlog::Revision;
20 use crate::revlog::Revision;
21 use crate::{Node, NULL_REVISION};
21 use crate::{Node, NULL_REVISION};
22
22
23 const REVISION_FLAG_CENSORED: u16 = 1 << 15;
24 const REVISION_FLAG_ELLIPSIS: u16 = 1 << 14;
25 const REVISION_FLAG_EXTSTORED: u16 = 1 << 13;
26 const REVISION_FLAG_HASCOPIESINFO: u16 = 1 << 12;
27
28 // Keep this in sync with REVIDX_KNOWN_FLAGS in
29 // mercurial/revlogutils/flagutil.py
30 const REVIDX_KNOWN_FLAGS: u16 = REVISION_FLAG_CENSORED
31 | REVISION_FLAG_ELLIPSIS
32 | REVISION_FLAG_EXTSTORED
33 | REVISION_FLAG_HASCOPIESINFO;
34
23 #[derive(derive_more::From)]
35 #[derive(derive_more::From)]
24 pub enum RevlogError {
36 pub enum RevlogError {
25 InvalidRevision,
37 InvalidRevision,
26 /// Working directory is not supported
38 /// Working directory is not supported
27 WDirUnsupported,
39 WDirUnsupported,
28 /// Found more than one entry whose ID match the requested prefix
40 /// Found more than one entry whose ID match the requested prefix
29 AmbiguousPrefix,
41 AmbiguousPrefix,
30 #[from]
42 #[from]
31 Other(HgError),
43 Other(HgError),
32 }
44 }
33
45
34 impl From<NodeMapError> for RevlogError {
46 impl From<NodeMapError> for RevlogError {
35 fn from(error: NodeMapError) -> Self {
47 fn from(error: NodeMapError) -> Self {
36 match error {
48 match error {
37 NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
49 NodeMapError::MultipleResults => RevlogError::AmbiguousPrefix,
38 NodeMapError::RevisionNotInIndex(_) => RevlogError::corrupted(),
50 NodeMapError::RevisionNotInIndex(_) => RevlogError::corrupted(),
39 }
51 }
40 }
52 }
41 }
53 }
42
54
43 fn corrupted() -> HgError {
55 fn corrupted() -> HgError {
44 HgError::corrupted("corrupted revlog")
56 HgError::corrupted("corrupted revlog")
45 }
57 }
46
58
47 impl RevlogError {
59 impl RevlogError {
48 fn corrupted() -> Self {
60 fn corrupted() -> Self {
49 RevlogError::Other(corrupted())
61 RevlogError::Other(corrupted())
50 }
62 }
51 }
63 }
52
64
53 /// Read only implementation of revlog.
65 /// Read only implementation of revlog.
54 pub struct Revlog {
66 pub struct Revlog {
55 /// When index and data are not interleaved: bytes of the revlog index.
67 /// When index and data are not interleaved: bytes of the revlog index.
56 /// When index and data are interleaved: bytes of the revlog index and
68 /// When index and data are interleaved: bytes of the revlog index and
57 /// data.
69 /// data.
58 index: Index,
70 index: Index,
59 /// When index and data are not interleaved: bytes of the revlog data
71 /// When index and data are not interleaved: bytes of the revlog data
60 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
72 data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>>,
61 /// When present on disk: the persistent nodemap for this revlog
73 /// When present on disk: the persistent nodemap for this revlog
62 nodemap: Option<nodemap::NodeTree>,
74 nodemap: Option<nodemap::NodeTree>,
63 }
75 }
64
76
65 impl Revlog {
77 impl Revlog {
66 /// Open a revlog index file.
78 /// Open a revlog index file.
67 ///
79 ///
68 /// It will also open the associated data file if index and data are not
80 /// It will also open the associated data file if index and data are not
69 /// interleaved.
81 /// interleaved.
70 #[timed]
82 #[timed]
71 pub fn open(
83 pub fn open(
72 repo: &Repo,
84 repo: &Repo,
73 index_path: impl AsRef<Path>,
85 index_path: impl AsRef<Path>,
74 data_path: Option<&Path>,
86 data_path: Option<&Path>,
75 ) -> Result<Self, HgError> {
87 ) -> Result<Self, HgError> {
76 let index_path = index_path.as_ref();
88 let index_path = index_path.as_ref();
77 let index = {
89 let index = {
78 match repo.store_vfs().mmap_open_opt(&index_path)? {
90 match repo.store_vfs().mmap_open_opt(&index_path)? {
79 None => Index::new(Box::new(vec![])),
91 None => Index::new(Box::new(vec![])),
80 Some(index_mmap) => {
92 Some(index_mmap) => {
81 let index = Index::new(Box::new(index_mmap))?;
93 let index = Index::new(Box::new(index_mmap))?;
82 Ok(index)
94 Ok(index)
83 }
95 }
84 }
96 }
85 }?;
97 }?;
86
98
87 let default_data_path = index_path.with_extension("d");
99 let default_data_path = index_path.with_extension("d");
88
100
89 // type annotation required
101 // type annotation required
90 // won't recognize Mmap as Deref<Target = [u8]>
102 // won't recognize Mmap as Deref<Target = [u8]>
91 let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
103 let data_bytes: Option<Box<dyn Deref<Target = [u8]> + Send>> =
92 if index.is_inline() {
104 if index.is_inline() {
93 None
105 None
94 } else {
106 } else {
95 let data_path = data_path.unwrap_or(&default_data_path);
107 let data_path = data_path.unwrap_or(&default_data_path);
96 let data_mmap = repo.store_vfs().mmap_open(data_path)?;
108 let data_mmap = repo.store_vfs().mmap_open(data_path)?;
97 Some(Box::new(data_mmap))
109 Some(Box::new(data_mmap))
98 };
110 };
99
111
100 let nodemap = if index.is_inline() {
112 let nodemap = if index.is_inline() {
101 None
113 None
102 } else {
114 } else {
103 NodeMapDocket::read_from_file(repo, index_path)?.map(
115 NodeMapDocket::read_from_file(repo, index_path)?.map(
104 |(docket, data)| {
116 |(docket, data)| {
105 nodemap::NodeTree::load_bytes(
117 nodemap::NodeTree::load_bytes(
106 Box::new(data),
118 Box::new(data),
107 docket.data_length,
119 docket.data_length,
108 )
120 )
109 },
121 },
110 )
122 )
111 };
123 };
112
124
113 Ok(Revlog {
125 Ok(Revlog {
114 index,
126 index,
115 data_bytes,
127 data_bytes,
116 nodemap,
128 nodemap,
117 })
129 })
118 }
130 }
119
131
120 /// Return number of entries of the `Revlog`.
132 /// Return number of entries of the `Revlog`.
121 pub fn len(&self) -> usize {
133 pub fn len(&self) -> usize {
122 self.index.len()
134 self.index.len()
123 }
135 }
124
136
125 /// Returns `true` if the `Revlog` has zero `entries`.
137 /// Returns `true` if the `Revlog` has zero `entries`.
126 pub fn is_empty(&self) -> bool {
138 pub fn is_empty(&self) -> bool {
127 self.index.is_empty()
139 self.index.is_empty()
128 }
140 }
129
141
130 /// Returns the node ID for the given revision number, if it exists in this
142 /// Returns the node ID for the given revision number, if it exists in this
131 /// revlog
143 /// revlog
132 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
144 pub fn node_from_rev(&self, rev: Revision) -> Option<&Node> {
133 if rev == NULL_REVISION {
145 if rev == NULL_REVISION {
134 return Some(&NULL_NODE);
146 return Some(&NULL_NODE);
135 }
147 }
136 Some(self.index.get_entry(rev)?.hash())
148 Some(self.index.get_entry(rev)?.hash())
137 }
149 }
138
150
139 /// Return the revision number for the given node ID, if it exists in this
151 /// Return the revision number for the given node ID, if it exists in this
140 /// revlog
152 /// revlog
141 #[timed]
153 #[timed]
142 pub fn rev_from_node(
154 pub fn rev_from_node(
143 &self,
155 &self,
144 node: NodePrefix,
156 node: NodePrefix,
145 ) -> Result<Revision, RevlogError> {
157 ) -> Result<Revision, RevlogError> {
146 if node.is_prefix_of(&NULL_NODE) {
158 if node.is_prefix_of(&NULL_NODE) {
147 return Ok(NULL_REVISION);
159 return Ok(NULL_REVISION);
148 }
160 }
149
161
150 if let Some(nodemap) = &self.nodemap {
162 if let Some(nodemap) = &self.nodemap {
151 return nodemap
163 return nodemap
152 .find_bin(&self.index, node)?
164 .find_bin(&self.index, node)?
153 .ok_or(RevlogError::InvalidRevision);
165 .ok_or(RevlogError::InvalidRevision);
154 }
166 }
155
167
156 // Fallback to linear scan when a persistent nodemap is not present.
168 // Fallback to linear scan when a persistent nodemap is not present.
157 // This happens when the persistent-nodemap experimental feature is not
169 // This happens when the persistent-nodemap experimental feature is not
158 // enabled, or for small revlogs.
170 // enabled, or for small revlogs.
159 //
171 //
160 // TODO: consider building a non-persistent nodemap in memory to
172 // TODO: consider building a non-persistent nodemap in memory to
161 // optimize these cases.
173 // optimize these cases.
162 let mut found_by_prefix = None;
174 let mut found_by_prefix = None;
163 for rev in (0..self.len() as Revision).rev() {
175 for rev in (0..self.len() as Revision).rev() {
164 let index_entry =
176 let index_entry =
165 self.index.get_entry(rev).ok_or(HgError::corrupted(
177 self.index.get_entry(rev).ok_or(HgError::corrupted(
166 "revlog references a revision not in the index",
178 "revlog references a revision not in the index",
167 ))?;
179 ))?;
168 if node == *index_entry.hash() {
180 if node == *index_entry.hash() {
169 return Ok(rev);
181 return Ok(rev);
170 }
182 }
171 if node.is_prefix_of(index_entry.hash()) {
183 if node.is_prefix_of(index_entry.hash()) {
172 if found_by_prefix.is_some() {
184 if found_by_prefix.is_some() {
173 return Err(RevlogError::AmbiguousPrefix);
185 return Err(RevlogError::AmbiguousPrefix);
174 }
186 }
175 found_by_prefix = Some(rev)
187 found_by_prefix = Some(rev)
176 }
188 }
177 }
189 }
178 found_by_prefix.ok_or(RevlogError::InvalidRevision)
190 found_by_prefix.ok_or(RevlogError::InvalidRevision)
179 }
191 }
180
192
181 /// Returns whether the given revision exists in this revlog.
193 /// Returns whether the given revision exists in this revlog.
182 pub fn has_rev(&self, rev: Revision) -> bool {
194 pub fn has_rev(&self, rev: Revision) -> bool {
183 self.index.get_entry(rev).is_some()
195 self.index.get_entry(rev).is_some()
184 }
196 }
185
197
186 /// Return the full data associated to a revision.
198 /// Return the full data associated to a revision.
187 ///
199 ///
188 /// All entries required to build the final data out of deltas will be
200 /// All entries required to build the final data out of deltas will be
189 /// retrieved as needed, and the deltas will be applied to the inital
201 /// retrieved as needed, and the deltas will be applied to the inital
190 /// snapshot to rebuild the final data.
202 /// snapshot to rebuild the final data.
191 #[timed]
203 #[timed]
192 pub fn get_rev_data(
204 pub fn get_rev_data(
193 &self,
205 &self,
194 rev: Revision,
206 rev: Revision,
195 ) -> Result<Cow<[u8]>, RevlogError> {
207 ) -> Result<Cow<[u8]>, RevlogError> {
196 if rev == NULL_REVISION {
208 if rev == NULL_REVISION {
197 return Ok(Cow::Borrowed(&[]));
209 return Ok(Cow::Borrowed(&[]));
198 };
210 };
199 Ok(self.get_entry(rev)?.data()?)
211 Ok(self.get_entry(rev)?.data()?)
200 }
212 }
201
213
202 /// Check the hash of some given data against the recorded hash.
214 /// Check the hash of some given data against the recorded hash.
203 pub fn check_hash(
215 pub fn check_hash(
204 &self,
216 &self,
205 p1: Revision,
217 p1: Revision,
206 p2: Revision,
218 p2: Revision,
207 expected: &[u8],
219 expected: &[u8],
208 data: &[u8],
220 data: &[u8],
209 ) -> bool {
221 ) -> bool {
210 let e1 = self.index.get_entry(p1);
222 let e1 = self.index.get_entry(p1);
211 let h1 = match e1 {
223 let h1 = match e1 {
212 Some(ref entry) => entry.hash(),
224 Some(ref entry) => entry.hash(),
213 None => &NULL_NODE,
225 None => &NULL_NODE,
214 };
226 };
215 let e2 = self.index.get_entry(p2);
227 let e2 = self.index.get_entry(p2);
216 let h2 = match e2 {
228 let h2 = match e2 {
217 Some(ref entry) => entry.hash(),
229 Some(ref entry) => entry.hash(),
218 None => &NULL_NODE,
230 None => &NULL_NODE,
219 };
231 };
220
232
221 &hash(data, h1.as_bytes(), h2.as_bytes()) == expected
233 &hash(data, h1.as_bytes(), h2.as_bytes()) == expected
222 }
234 }
223
235
224 /// Build the full data of a revision out its snapshot
236 /// Build the full data of a revision out its snapshot
225 /// and its deltas.
237 /// and its deltas.
226 #[timed]
238 #[timed]
227 fn build_data_from_deltas(
239 fn build_data_from_deltas(
228 snapshot: RevlogEntry,
240 snapshot: RevlogEntry,
229 deltas: &[RevlogEntry],
241 deltas: &[RevlogEntry],
230 ) -> Result<Vec<u8>, HgError> {
242 ) -> Result<Vec<u8>, HgError> {
231 let snapshot = snapshot.data_chunk()?;
243 let snapshot = snapshot.data_chunk()?;
232 let deltas = deltas
244 let deltas = deltas
233 .iter()
245 .iter()
234 .rev()
246 .rev()
235 .map(RevlogEntry::data_chunk)
247 .map(RevlogEntry::data_chunk)
236 .collect::<Result<Vec<_>, _>>()?;
248 .collect::<Result<Vec<_>, _>>()?;
237 let patches: Vec<_> =
249 let patches: Vec<_> =
238 deltas.iter().map(|d| patch::PatchList::new(d)).collect();
250 deltas.iter().map(|d| patch::PatchList::new(d)).collect();
239 let patch = patch::fold_patch_lists(&patches);
251 let patch = patch::fold_patch_lists(&patches);
240 Ok(patch.apply(&snapshot))
252 Ok(patch.apply(&snapshot))
241 }
253 }
242
254
243 /// Return the revlog data.
255 /// Return the revlog data.
244 fn data(&self) -> &[u8] {
256 fn data(&self) -> &[u8] {
245 match self.data_bytes {
257 match self.data_bytes {
246 Some(ref data_bytes) => &data_bytes,
258 Some(ref data_bytes) => &data_bytes,
247 None => panic!(
259 None => panic!(
248 "forgot to load the data or trying to access inline data"
260 "forgot to load the data or trying to access inline data"
249 ),
261 ),
250 }
262 }
251 }
263 }
252
264
253 /// Get an entry of the revlog.
265 /// Get an entry of the revlog.
254 pub fn get_entry(
266 pub fn get_entry(
255 &self,
267 &self,
256 rev: Revision,
268 rev: Revision,
257 ) -> Result<RevlogEntry, RevlogError> {
269 ) -> Result<RevlogEntry, RevlogError> {
258 let index_entry = self
270 let index_entry = self
259 .index
271 .index
260 .get_entry(rev)
272 .get_entry(rev)
261 .ok_or(RevlogError::InvalidRevision)?;
273 .ok_or(RevlogError::InvalidRevision)?;
262 let start = index_entry.offset();
274 let start = index_entry.offset();
263 let end = start + index_entry.compressed_len() as usize;
275 let end = start + index_entry.compressed_len() as usize;
264 let data = if self.index.is_inline() {
276 let data = if self.index.is_inline() {
265 self.index.data(start, end)
277 self.index.data(start, end)
266 } else {
278 } else {
267 &self.data()[start..end]
279 &self.data()[start..end]
268 };
280 };
269 let entry = RevlogEntry {
281 let entry = RevlogEntry {
270 revlog: self,
282 revlog: self,
271 rev,
283 rev,
272 bytes: data,
284 bytes: data,
273 compressed_len: index_entry.compressed_len(),
285 compressed_len: index_entry.compressed_len(),
274 uncompressed_len: index_entry.uncompressed_len(),
286 uncompressed_len: index_entry.uncompressed_len(),
275 base_rev_or_base_of_delta_chain: if index_entry
287 base_rev_or_base_of_delta_chain: if index_entry
276 .base_revision_or_base_of_delta_chain()
288 .base_revision_or_base_of_delta_chain()
277 == rev
289 == rev
278 {
290 {
279 None
291 None
280 } else {
292 } else {
281 Some(index_entry.base_revision_or_base_of_delta_chain())
293 Some(index_entry.base_revision_or_base_of_delta_chain())
282 },
294 },
283 p1: index_entry.p1(),
295 p1: index_entry.p1(),
284 p2: index_entry.p2(),
296 p2: index_entry.p2(),
297 flags: index_entry.flags(),
285 hash: *index_entry.hash(),
298 hash: *index_entry.hash(),
286 };
299 };
287 Ok(entry)
300 Ok(entry)
288 }
301 }
289
302
290 /// when resolving internal references within revlog, any errors
303 /// when resolving internal references within revlog, any errors
291 /// should be reported as corruption, instead of e.g. "invalid revision"
304 /// should be reported as corruption, instead of e.g. "invalid revision"
292 fn get_entry_internal(
305 fn get_entry_internal(
293 &self,
306 &self,
294 rev: Revision,
307 rev: Revision,
295 ) -> Result<RevlogEntry, HgError> {
308 ) -> Result<RevlogEntry, HgError> {
296 return self.get_entry(rev).map_err(|_| corrupted());
309 return self.get_entry(rev).map_err(|_| corrupted());
297 }
310 }
298 }
311 }
299
312
300 /// The revlog entry's bytes and the necessary informations to extract
313 /// The revlog entry's bytes and the necessary informations to extract
301 /// the entry's data.
314 /// the entry's data.
302 #[derive(Clone)]
315 #[derive(Clone)]
303 pub struct RevlogEntry<'a> {
316 pub struct RevlogEntry<'a> {
304 revlog: &'a Revlog,
317 revlog: &'a Revlog,
305 rev: Revision,
318 rev: Revision,
306 bytes: &'a [u8],
319 bytes: &'a [u8],
307 compressed_len: u32,
320 compressed_len: u32,
308 uncompressed_len: i32,
321 uncompressed_len: i32,
309 base_rev_or_base_of_delta_chain: Option<Revision>,
322 base_rev_or_base_of_delta_chain: Option<Revision>,
310 p1: Revision,
323 p1: Revision,
311 p2: Revision,
324 p2: Revision,
325 flags: u16,
312 hash: Node,
326 hash: Node,
313 }
327 }
314
328
315 impl<'a> RevlogEntry<'a> {
329 impl<'a> RevlogEntry<'a> {
316 pub fn revision(&self) -> Revision {
330 pub fn revision(&self) -> Revision {
317 self.rev
331 self.rev
318 }
332 }
319
333
320 pub fn uncompressed_len(&self) -> Option<u32> {
334 pub fn uncompressed_len(&self) -> Option<u32> {
321 u32::try_from(self.uncompressed_len).ok()
335 u32::try_from(self.uncompressed_len).ok()
322 }
336 }
323
337
338 pub fn has_p1(&self) -> bool {
339 self.p1 != NULL_REVISION
340 }
341
342 pub fn is_cencored(&self) -> bool {
343 (self.flags & REVISION_FLAG_CENSORED) != 0
344 }
345
346 pub fn has_length_affecting_flag_processor(&self) -> bool {
347 // Relevant Python code: revlog.size()
348 // note: ELLIPSIS is known to not change the content
349 (self.flags & (REVIDX_KNOWN_FLAGS ^ REVISION_FLAG_ELLIPSIS)) != 0
350 }
351
324 /// The data for this entry, after resolving deltas if any.
352 /// The data for this entry, after resolving deltas if any.
325 pub fn data(&self) -> Result<Cow<'a, [u8]>, HgError> {
353 pub fn data(&self) -> Result<Cow<'a, [u8]>, HgError> {
326 let mut entry = self.clone();
354 let mut entry = self.clone();
327 let mut delta_chain = vec![];
355 let mut delta_chain = vec![];
328
356
329 // The meaning of `base_rev_or_base_of_delta_chain` depends on
357 // The meaning of `base_rev_or_base_of_delta_chain` depends on
330 // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
358 // generaldelta. See the doc on `ENTRY_DELTA_BASE` in
331 // `mercurial/revlogutils/constants.py` and the code in
359 // `mercurial/revlogutils/constants.py` and the code in
332 // [_chaininfo] and in [index_deltachain].
360 // [_chaininfo] and in [index_deltachain].
333 let uses_generaldelta = self.revlog.index.uses_generaldelta();
361 let uses_generaldelta = self.revlog.index.uses_generaldelta();
334 while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
362 while let Some(base_rev) = entry.base_rev_or_base_of_delta_chain {
335 let base_rev = if uses_generaldelta {
363 let base_rev = if uses_generaldelta {
336 base_rev
364 base_rev
337 } else {
365 } else {
338 entry.rev - 1
366 entry.rev - 1
339 };
367 };
340 delta_chain.push(entry);
368 delta_chain.push(entry);
341 entry = self.revlog.get_entry_internal(base_rev)?;
369 entry = self.revlog.get_entry_internal(base_rev)?;
342 }
370 }
343
371
344 let data = if delta_chain.is_empty() {
372 let data = if delta_chain.is_empty() {
345 entry.data_chunk()?
373 entry.data_chunk()?
346 } else {
374 } else {
347 Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
375 Revlog::build_data_from_deltas(entry, &delta_chain)?.into()
348 };
376 };
349
377
350 if self.revlog.check_hash(
378 if self.revlog.check_hash(
351 self.p1,
379 self.p1,
352 self.p2,
380 self.p2,
353 self.hash.as_bytes(),
381 self.hash.as_bytes(),
354 &data,
382 &data,
355 ) {
383 ) {
356 Ok(data)
384 Ok(data)
357 } else {
385 } else {
358 Err(corrupted())
386 Err(corrupted())
359 }
387 }
360 }
388 }
361
389
362 /// Extract the data contained in the entry.
390 /// Extract the data contained in the entry.
363 /// This may be a delta. (See `is_delta`.)
391 /// This may be a delta. (See `is_delta`.)
364 fn data_chunk(&self) -> Result<Cow<'a, [u8]>, HgError> {
392 fn data_chunk(&self) -> Result<Cow<'a, [u8]>, HgError> {
365 if self.bytes.is_empty() {
393 if self.bytes.is_empty() {
366 return Ok(Cow::Borrowed(&[]));
394 return Ok(Cow::Borrowed(&[]));
367 }
395 }
368 match self.bytes[0] {
396 match self.bytes[0] {
369 // Revision data is the entirety of the entry, including this
397 // Revision data is the entirety of the entry, including this
370 // header.
398 // header.
371 b'\0' => Ok(Cow::Borrowed(self.bytes)),
399 b'\0' => Ok(Cow::Borrowed(self.bytes)),
372 // Raw revision data follows.
400 // Raw revision data follows.
373 b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
401 b'u' => Ok(Cow::Borrowed(&self.bytes[1..])),
374 // zlib (RFC 1950) data.
402 // zlib (RFC 1950) data.
375 b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
403 b'x' => Ok(Cow::Owned(self.uncompressed_zlib_data()?)),
376 // zstd data.
404 // zstd data.
377 b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
405 b'\x28' => Ok(Cow::Owned(self.uncompressed_zstd_data()?)),
378 // A proper new format should have had a repo/store requirement.
406 // A proper new format should have had a repo/store requirement.
379 _format_type => Err(corrupted()),
407 _format_type => Err(corrupted()),
380 }
408 }
381 }
409 }
382
410
383 fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
411 fn uncompressed_zlib_data(&self) -> Result<Vec<u8>, HgError> {
384 let mut decoder = ZlibDecoder::new(self.bytes);
412 let mut decoder = ZlibDecoder::new(self.bytes);
385 if self.is_delta() {
413 if self.is_delta() {
386 let mut buf = Vec::with_capacity(self.compressed_len as usize);
414 let mut buf = Vec::with_capacity(self.compressed_len as usize);
387 decoder.read_to_end(&mut buf).map_err(|_| corrupted())?;
415 decoder.read_to_end(&mut buf).map_err(|_| corrupted())?;
388 Ok(buf)
416 Ok(buf)
389 } else {
417 } else {
390 let cap = self.uncompressed_len.max(0) as usize;
418 let cap = self.uncompressed_len.max(0) as usize;
391 let mut buf = vec![0; cap];
419 let mut buf = vec![0; cap];
392 decoder.read_exact(&mut buf).map_err(|_| corrupted())?;
420 decoder.read_exact(&mut buf).map_err(|_| corrupted())?;
393 Ok(buf)
421 Ok(buf)
394 }
422 }
395 }
423 }
396
424
397 fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
425 fn uncompressed_zstd_data(&self) -> Result<Vec<u8>, HgError> {
398 if self.is_delta() {
426 if self.is_delta() {
399 let mut buf = Vec::with_capacity(self.compressed_len as usize);
427 let mut buf = Vec::with_capacity(self.compressed_len as usize);
400 zstd::stream::copy_decode(self.bytes, &mut buf)
428 zstd::stream::copy_decode(self.bytes, &mut buf)
401 .map_err(|_| corrupted())?;
429 .map_err(|_| corrupted())?;
402 Ok(buf)
430 Ok(buf)
403 } else {
431 } else {
404 let cap = self.uncompressed_len.max(0) as usize;
432 let cap = self.uncompressed_len.max(0) as usize;
405 let mut buf = vec![0; cap];
433 let mut buf = vec![0; cap];
406 let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf)
434 let len = zstd::block::decompress_to_buffer(self.bytes, &mut buf)
407 .map_err(|_| corrupted())?;
435 .map_err(|_| corrupted())?;
408 if len != self.uncompressed_len as usize {
436 if len != self.uncompressed_len as usize {
409 Err(corrupted())
437 Err(corrupted())
410 } else {
438 } else {
411 Ok(buf)
439 Ok(buf)
412 }
440 }
413 }
441 }
414 }
442 }
415
443
416 /// Tell if the entry is a snapshot or a delta
444 /// Tell if the entry is a snapshot or a delta
417 /// (influences on decompression).
445 /// (influences on decompression).
418 fn is_delta(&self) -> bool {
446 fn is_delta(&self) -> bool {
419 self.base_rev_or_base_of_delta_chain.is_some()
447 self.base_rev_or_base_of_delta_chain.is_some()
420 }
448 }
421 }
449 }
422
450
423 /// Calculate the hash of a revision given its data and its parents.
451 /// Calculate the hash of a revision given its data and its parents.
424 fn hash(
452 fn hash(
425 data: &[u8],
453 data: &[u8],
426 p1_hash: &[u8],
454 p1_hash: &[u8],
427 p2_hash: &[u8],
455 p2_hash: &[u8],
428 ) -> [u8; NODE_BYTES_LENGTH] {
456 ) -> [u8; NODE_BYTES_LENGTH] {
429 let mut hasher = Sha1::new();
457 let mut hasher = Sha1::new();
430 let (a, b) = (p1_hash, p2_hash);
458 let (a, b) = (p1_hash, p2_hash);
431 if a > b {
459 if a > b {
432 hasher.update(b);
460 hasher.update(b);
433 hasher.update(a);
461 hasher.update(a);
434 } else {
462 } else {
435 hasher.update(a);
463 hasher.update(a);
436 hasher.update(b);
464 hasher.update(b);
437 }
465 }
438 hasher.update(data);
466 hasher.update(data);
439 *hasher.finalize().as_ref()
467 *hasher.finalize().as_ref()
440 }
468 }
@@ -1,539 +1,539 b''
1 // status.rs
1 // status.rs
2 //
2 //
3 // Copyright 2020, Georges Racinet <georges.racinets@octobus.net>
3 // Copyright 2020, Georges Racinet <georges.racinets@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::error::CommandError;
8 use crate::error::CommandError;
9 use crate::ui::Ui;
9 use crate::ui::Ui;
10 use crate::utils::path_utils::RelativizePaths;
10 use crate::utils::path_utils::RelativizePaths;
11 use clap::{Arg, SubCommand};
11 use clap::{Arg, SubCommand};
12 use format_bytes::format_bytes;
12 use format_bytes::format_bytes;
13 use hg;
13 use hg;
14 use hg::config::Config;
14 use hg::config::Config;
15 use hg::dirstate::has_exec_bit;
15 use hg::dirstate::has_exec_bit;
16 use hg::dirstate::status::StatusPath;
16 use hg::dirstate::status::StatusPath;
17 use hg::dirstate::TruncatedTimestamp;
17 use hg::dirstate::TruncatedTimestamp;
18 use hg::dirstate::RANGE_MASK_31BIT;
18 use hg::dirstate::RANGE_MASK_31BIT;
19 use hg::errors::{HgError, IoResultExt};
19 use hg::errors::{HgError, IoResultExt};
20 use hg::lock::LockError;
20 use hg::lock::LockError;
21 use hg::manifest::Manifest;
21 use hg::manifest::Manifest;
22 use hg::matchers::AlwaysMatcher;
22 use hg::matchers::AlwaysMatcher;
23 use hg::repo::Repo;
23 use hg::repo::Repo;
24 use hg::utils::files::get_bytes_from_os_string;
24 use hg::utils::files::get_bytes_from_os_string;
25 use hg::utils::files::get_bytes_from_path;
25 use hg::utils::files::get_bytes_from_path;
26 use hg::utils::files::get_path_from_bytes;
26 use hg::utils::files::get_path_from_bytes;
27 use hg::utils::hg_path::{hg_path_to_path_buf, HgPath};
27 use hg::utils::hg_path::{hg_path_to_path_buf, HgPath};
28 use hg::StatusOptions;
28 use hg::StatusOptions;
29 use log::info;
29 use log::info;
30 use std::io;
30 use std::io;
31 use std::path::PathBuf;
31 use std::path::PathBuf;
32
32
33 pub const HELP_TEXT: &str = "
33 pub const HELP_TEXT: &str = "
34 Show changed files in the working directory
34 Show changed files in the working directory
35
35
36 This is a pure Rust version of `hg status`.
36 This is a pure Rust version of `hg status`.
37
37
38 Some options might be missing, check the list below.
38 Some options might be missing, check the list below.
39 ";
39 ";
40
40
41 pub fn args() -> clap::App<'static, 'static> {
41 pub fn args() -> clap::App<'static, 'static> {
42 SubCommand::with_name("status")
42 SubCommand::with_name("status")
43 .alias("st")
43 .alias("st")
44 .about(HELP_TEXT)
44 .about(HELP_TEXT)
45 .arg(
45 .arg(
46 Arg::with_name("all")
46 Arg::with_name("all")
47 .help("show status of all files")
47 .help("show status of all files")
48 .short("-A")
48 .short("-A")
49 .long("--all"),
49 .long("--all"),
50 )
50 )
51 .arg(
51 .arg(
52 Arg::with_name("modified")
52 Arg::with_name("modified")
53 .help("show only modified files")
53 .help("show only modified files")
54 .short("-m")
54 .short("-m")
55 .long("--modified"),
55 .long("--modified"),
56 )
56 )
57 .arg(
57 .arg(
58 Arg::with_name("added")
58 Arg::with_name("added")
59 .help("show only added files")
59 .help("show only added files")
60 .short("-a")
60 .short("-a")
61 .long("--added"),
61 .long("--added"),
62 )
62 )
63 .arg(
63 .arg(
64 Arg::with_name("removed")
64 Arg::with_name("removed")
65 .help("show only removed files")
65 .help("show only removed files")
66 .short("-r")
66 .short("-r")
67 .long("--removed"),
67 .long("--removed"),
68 )
68 )
69 .arg(
69 .arg(
70 Arg::with_name("clean")
70 Arg::with_name("clean")
71 .help("show only clean files")
71 .help("show only clean files")
72 .short("-c")
72 .short("-c")
73 .long("--clean"),
73 .long("--clean"),
74 )
74 )
75 .arg(
75 .arg(
76 Arg::with_name("deleted")
76 Arg::with_name("deleted")
77 .help("show only deleted files")
77 .help("show only deleted files")
78 .short("-d")
78 .short("-d")
79 .long("--deleted"),
79 .long("--deleted"),
80 )
80 )
81 .arg(
81 .arg(
82 Arg::with_name("unknown")
82 Arg::with_name("unknown")
83 .help("show only unknown (not tracked) files")
83 .help("show only unknown (not tracked) files")
84 .short("-u")
84 .short("-u")
85 .long("--unknown"),
85 .long("--unknown"),
86 )
86 )
87 .arg(
87 .arg(
88 Arg::with_name("ignored")
88 Arg::with_name("ignored")
89 .help("show only ignored files")
89 .help("show only ignored files")
90 .short("-i")
90 .short("-i")
91 .long("--ignored"),
91 .long("--ignored"),
92 )
92 )
93 .arg(
93 .arg(
94 Arg::with_name("copies")
94 Arg::with_name("copies")
95 .help("show source of copied files (DEFAULT: ui.statuscopies)")
95 .help("show source of copied files (DEFAULT: ui.statuscopies)")
96 .short("-C")
96 .short("-C")
97 .long("--copies"),
97 .long("--copies"),
98 )
98 )
99 .arg(
99 .arg(
100 Arg::with_name("no-status")
100 Arg::with_name("no-status")
101 .help("hide status prefix")
101 .help("hide status prefix")
102 .short("-n")
102 .short("-n")
103 .long("--no-status"),
103 .long("--no-status"),
104 )
104 )
105 }
105 }
106
106
107 /// Pure data type allowing the caller to specify file states to display
107 /// Pure data type allowing the caller to specify file states to display
108 #[derive(Copy, Clone, Debug)]
108 #[derive(Copy, Clone, Debug)]
109 pub struct DisplayStates {
109 pub struct DisplayStates {
110 pub modified: bool,
110 pub modified: bool,
111 pub added: bool,
111 pub added: bool,
112 pub removed: bool,
112 pub removed: bool,
113 pub clean: bool,
113 pub clean: bool,
114 pub deleted: bool,
114 pub deleted: bool,
115 pub unknown: bool,
115 pub unknown: bool,
116 pub ignored: bool,
116 pub ignored: bool,
117 }
117 }
118
118
119 pub const DEFAULT_DISPLAY_STATES: DisplayStates = DisplayStates {
119 pub const DEFAULT_DISPLAY_STATES: DisplayStates = DisplayStates {
120 modified: true,
120 modified: true,
121 added: true,
121 added: true,
122 removed: true,
122 removed: true,
123 clean: false,
123 clean: false,
124 deleted: true,
124 deleted: true,
125 unknown: true,
125 unknown: true,
126 ignored: false,
126 ignored: false,
127 };
127 };
128
128
129 pub const ALL_DISPLAY_STATES: DisplayStates = DisplayStates {
129 pub const ALL_DISPLAY_STATES: DisplayStates = DisplayStates {
130 modified: true,
130 modified: true,
131 added: true,
131 added: true,
132 removed: true,
132 removed: true,
133 clean: true,
133 clean: true,
134 deleted: true,
134 deleted: true,
135 unknown: true,
135 unknown: true,
136 ignored: true,
136 ignored: true,
137 };
137 };
138
138
139 impl DisplayStates {
139 impl DisplayStates {
140 pub fn is_empty(&self) -> bool {
140 pub fn is_empty(&self) -> bool {
141 !(self.modified
141 !(self.modified
142 || self.added
142 || self.added
143 || self.removed
143 || self.removed
144 || self.clean
144 || self.clean
145 || self.deleted
145 || self.deleted
146 || self.unknown
146 || self.unknown
147 || self.ignored)
147 || self.ignored)
148 }
148 }
149 }
149 }
150
150
151 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
151 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
152 let status_enabled_default = false;
152 let status_enabled_default = false;
153 let status_enabled = invocation.config.get_option(b"rhg", b"status")?;
153 let status_enabled = invocation.config.get_option(b"rhg", b"status")?;
154 if !status_enabled.unwrap_or(status_enabled_default) {
154 if !status_enabled.unwrap_or(status_enabled_default) {
155 return Err(CommandError::unsupported(
155 return Err(CommandError::unsupported(
156 "status is experimental in rhg (enable it with 'rhg.status = true' \
156 "status is experimental in rhg (enable it with 'rhg.status = true' \
157 or enable fallback with 'rhg.on-unsupported = fallback')"
157 or enable fallback with 'rhg.on-unsupported = fallback')"
158 ));
158 ));
159 }
159 }
160
160
161 // TODO: lift these limitations
161 // TODO: lift these limitations
162 if invocation.config.get_bool(b"ui", b"tweakdefaults")? {
162 if invocation.config.get_bool(b"ui", b"tweakdefaults")? {
163 return Err(CommandError::unsupported(
163 return Err(CommandError::unsupported(
164 "ui.tweakdefaults is not yet supported with rhg status",
164 "ui.tweakdefaults is not yet supported with rhg status",
165 ));
165 ));
166 }
166 }
167 if invocation.config.get_bool(b"ui", b"statuscopies")? {
167 if invocation.config.get_bool(b"ui", b"statuscopies")? {
168 return Err(CommandError::unsupported(
168 return Err(CommandError::unsupported(
169 "ui.statuscopies is not yet supported with rhg status",
169 "ui.statuscopies is not yet supported with rhg status",
170 ));
170 ));
171 }
171 }
172 if invocation
172 if invocation
173 .config
173 .config
174 .get(b"commands", b"status.terse")
174 .get(b"commands", b"status.terse")
175 .is_some()
175 .is_some()
176 {
176 {
177 return Err(CommandError::unsupported(
177 return Err(CommandError::unsupported(
178 "status.terse is not yet supported with rhg status",
178 "status.terse is not yet supported with rhg status",
179 ));
179 ));
180 }
180 }
181
181
182 let ui = invocation.ui;
182 let ui = invocation.ui;
183 let config = invocation.config;
183 let config = invocation.config;
184 let args = invocation.subcommand_args;
184 let args = invocation.subcommand_args;
185
185
186 let verbose = !ui.plain()
186 let verbose = !ui.plain()
187 && !args.is_present("print0")
187 && !args.is_present("print0")
188 && (config.get_bool(b"ui", b"verbose")?
188 && (config.get_bool(b"ui", b"verbose")?
189 || config.get_bool(b"commands", b"status.verbose")?);
189 || config.get_bool(b"commands", b"status.verbose")?);
190 if verbose {
190 if verbose {
191 return Err(CommandError::unsupported(
191 return Err(CommandError::unsupported(
192 "verbose status is not supported yet",
192 "verbose status is not supported yet",
193 ));
193 ));
194 }
194 }
195
195
196 let all = args.is_present("all");
196 let all = args.is_present("all");
197 let display_states = if all {
197 let display_states = if all {
198 // TODO when implementing `--quiet`: it excludes clean files
198 // TODO when implementing `--quiet`: it excludes clean files
199 // from `--all`
199 // from `--all`
200 ALL_DISPLAY_STATES
200 ALL_DISPLAY_STATES
201 } else {
201 } else {
202 let requested = DisplayStates {
202 let requested = DisplayStates {
203 modified: args.is_present("modified"),
203 modified: args.is_present("modified"),
204 added: args.is_present("added"),
204 added: args.is_present("added"),
205 removed: args.is_present("removed"),
205 removed: args.is_present("removed"),
206 clean: args.is_present("clean"),
206 clean: args.is_present("clean"),
207 deleted: args.is_present("deleted"),
207 deleted: args.is_present("deleted"),
208 unknown: args.is_present("unknown"),
208 unknown: args.is_present("unknown"),
209 ignored: args.is_present("ignored"),
209 ignored: args.is_present("ignored"),
210 };
210 };
211 if requested.is_empty() {
211 if requested.is_empty() {
212 DEFAULT_DISPLAY_STATES
212 DEFAULT_DISPLAY_STATES
213 } else {
213 } else {
214 requested
214 requested
215 }
215 }
216 };
216 };
217 let no_status = args.is_present("no-status");
217 let no_status = args.is_present("no-status");
218 let list_copies = all
218 let list_copies = all
219 || args.is_present("copies")
219 || args.is_present("copies")
220 || config.get_bool(b"ui", b"statuscopies")?;
220 || config.get_bool(b"ui", b"statuscopies")?;
221
221
222 let repo = invocation.repo?;
222 let repo = invocation.repo?;
223
223
224 if repo.has_sparse() || repo.has_narrow() {
224 if repo.has_sparse() || repo.has_narrow() {
225 return Err(CommandError::unsupported(
225 return Err(CommandError::unsupported(
226 "rhg status is not supported for sparse checkouts or narrow clones yet"
226 "rhg status is not supported for sparse checkouts or narrow clones yet"
227 ));
227 ));
228 }
228 }
229
229
230 let mut dmap = repo.dirstate_map_mut()?;
230 let mut dmap = repo.dirstate_map_mut()?;
231
231
232 let options = StatusOptions {
232 let options = StatusOptions {
233 // we're currently supporting file systems with exec flags only
233 // we're currently supporting file systems with exec flags only
234 // anyway
234 // anyway
235 check_exec: true,
235 check_exec: true,
236 list_clean: display_states.clean,
236 list_clean: display_states.clean,
237 list_unknown: display_states.unknown,
237 list_unknown: display_states.unknown,
238 list_ignored: display_states.ignored,
238 list_ignored: display_states.ignored,
239 list_copies,
239 list_copies,
240 collect_traversed_dirs: false,
240 collect_traversed_dirs: false,
241 };
241 };
242 let (mut ds_status, pattern_warnings) = dmap.status(
242 let (mut ds_status, pattern_warnings) = dmap.status(
243 &AlwaysMatcher,
243 &AlwaysMatcher,
244 repo.working_directory_path().to_owned(),
244 repo.working_directory_path().to_owned(),
245 ignore_files(repo, config),
245 ignore_files(repo, config),
246 options,
246 options,
247 )?;
247 )?;
248 for warning in pattern_warnings {
248 for warning in pattern_warnings {
249 match warning {
249 match warning {
250 hg::PatternFileWarning::InvalidSyntax(path, syntax) => ui
250 hg::PatternFileWarning::InvalidSyntax(path, syntax) => ui
251 .write_stderr(&format_bytes!(
251 .write_stderr(&format_bytes!(
252 b"{}: ignoring invalid syntax '{}'\n",
252 b"{}: ignoring invalid syntax '{}'\n",
253 get_bytes_from_path(path),
253 get_bytes_from_path(path),
254 &*syntax
254 &*syntax
255 ))?,
255 ))?,
256 hg::PatternFileWarning::NoSuchFile(path) => {
256 hg::PatternFileWarning::NoSuchFile(path) => {
257 let path = if let Ok(relative) =
257 let path = if let Ok(relative) =
258 path.strip_prefix(repo.working_directory_path())
258 path.strip_prefix(repo.working_directory_path())
259 {
259 {
260 relative
260 relative
261 } else {
261 } else {
262 &*path
262 &*path
263 };
263 };
264 ui.write_stderr(&format_bytes!(
264 ui.write_stderr(&format_bytes!(
265 b"skipping unreadable pattern file '{}': \
265 b"skipping unreadable pattern file '{}': \
266 No such file or directory\n",
266 No such file or directory\n",
267 get_bytes_from_path(path),
267 get_bytes_from_path(path),
268 ))?
268 ))?
269 }
269 }
270 }
270 }
271 }
271 }
272
272
273 for (path, error) in ds_status.bad {
273 for (path, error) in ds_status.bad {
274 let error = match error {
274 let error = match error {
275 hg::BadMatch::OsError(code) => {
275 hg::BadMatch::OsError(code) => {
276 std::io::Error::from_raw_os_error(code).to_string()
276 std::io::Error::from_raw_os_error(code).to_string()
277 }
277 }
278 hg::BadMatch::BadType(ty) => {
278 hg::BadMatch::BadType(ty) => {
279 format!("unsupported file type (type is {})", ty)
279 format!("unsupported file type (type is {})", ty)
280 }
280 }
281 };
281 };
282 ui.write_stderr(&format_bytes!(
282 ui.write_stderr(&format_bytes!(
283 b"{}: {}\n",
283 b"{}: {}\n",
284 path.as_bytes(),
284 path.as_bytes(),
285 error.as_bytes()
285 error.as_bytes()
286 ))?
286 ))?
287 }
287 }
288 if !ds_status.unsure.is_empty() {
288 if !ds_status.unsure.is_empty() {
289 info!(
289 info!(
290 "Files to be rechecked by retrieval from filelog: {:?}",
290 "Files to be rechecked by retrieval from filelog: {:?}",
291 ds_status.unsure.iter().map(|s| &s.path).collect::<Vec<_>>()
291 ds_status.unsure.iter().map(|s| &s.path).collect::<Vec<_>>()
292 );
292 );
293 }
293 }
294 let mut fixup = Vec::new();
294 let mut fixup = Vec::new();
295 if !ds_status.unsure.is_empty()
295 if !ds_status.unsure.is_empty()
296 && (display_states.modified || display_states.clean)
296 && (display_states.modified || display_states.clean)
297 {
297 {
298 let p1 = repo.dirstate_parents()?.p1;
298 let p1 = repo.dirstate_parents()?.p1;
299 let manifest = repo.manifest_for_node(p1).map_err(|e| {
299 let manifest = repo.manifest_for_node(p1).map_err(|e| {
300 CommandError::from((e, &*format!("{:x}", p1.short())))
300 CommandError::from((e, &*format!("{:x}", p1.short())))
301 })?;
301 })?;
302 for to_check in ds_status.unsure {
302 for to_check in ds_status.unsure {
303 if unsure_is_modified(repo, &manifest, &to_check.path)? {
303 if unsure_is_modified(repo, &manifest, &to_check.path)? {
304 if display_states.modified {
304 if display_states.modified {
305 ds_status.modified.push(to_check);
305 ds_status.modified.push(to_check);
306 }
306 }
307 } else {
307 } else {
308 if display_states.clean {
308 if display_states.clean {
309 ds_status.clean.push(to_check.clone());
309 ds_status.clean.push(to_check.clone());
310 }
310 }
311 fixup.push(to_check.path.into_owned())
311 fixup.push(to_check.path.into_owned())
312 }
312 }
313 }
313 }
314 }
314 }
315 let relative_paths = (!ui.plain())
315 let relative_paths = (!ui.plain())
316 && config
316 && config
317 .get_option(b"commands", b"status.relative")?
317 .get_option(b"commands", b"status.relative")?
318 .unwrap_or(config.get_bool(b"ui", b"relative-paths")?);
318 .unwrap_or(config.get_bool(b"ui", b"relative-paths")?);
319 let output = DisplayStatusPaths {
319 let output = DisplayStatusPaths {
320 ui,
320 ui,
321 no_status,
321 no_status,
322 relativize: if relative_paths {
322 relativize: if relative_paths {
323 Some(RelativizePaths::new(repo)?)
323 Some(RelativizePaths::new(repo)?)
324 } else {
324 } else {
325 None
325 None
326 },
326 },
327 };
327 };
328 if display_states.modified {
328 if display_states.modified {
329 output.display(b"M", ds_status.modified)?;
329 output.display(b"M", ds_status.modified)?;
330 }
330 }
331 if display_states.added {
331 if display_states.added {
332 output.display(b"A", ds_status.added)?;
332 output.display(b"A", ds_status.added)?;
333 }
333 }
334 if display_states.removed {
334 if display_states.removed {
335 output.display(b"R", ds_status.removed)?;
335 output.display(b"R", ds_status.removed)?;
336 }
336 }
337 if display_states.deleted {
337 if display_states.deleted {
338 output.display(b"!", ds_status.deleted)?;
338 output.display(b"!", ds_status.deleted)?;
339 }
339 }
340 if display_states.unknown {
340 if display_states.unknown {
341 output.display(b"?", ds_status.unknown)?;
341 output.display(b"?", ds_status.unknown)?;
342 }
342 }
343 if display_states.ignored {
343 if display_states.ignored {
344 output.display(b"I", ds_status.ignored)?;
344 output.display(b"I", ds_status.ignored)?;
345 }
345 }
346 if display_states.clean {
346 if display_states.clean {
347 output.display(b"C", ds_status.clean)?;
347 output.display(b"C", ds_status.clean)?;
348 }
348 }
349
349
350 let mut dirstate_write_needed = ds_status.dirty;
350 let mut dirstate_write_needed = ds_status.dirty;
351 let filesystem_time_at_status_start =
351 let filesystem_time_at_status_start =
352 ds_status.filesystem_time_at_status_start;
352 ds_status.filesystem_time_at_status_start;
353
353
354 if (fixup.is_empty() || filesystem_time_at_status_start.is_none())
354 if (fixup.is_empty() || filesystem_time_at_status_start.is_none())
355 && !dirstate_write_needed
355 && !dirstate_write_needed
356 {
356 {
357 // Nothing to update
357 // Nothing to update
358 return Ok(());
358 return Ok(());
359 }
359 }
360
360
361 // Update the dirstate on disk if we can
361 // Update the dirstate on disk if we can
362 let with_lock_result =
362 let with_lock_result =
363 repo.try_with_wlock_no_wait(|| -> Result<(), CommandError> {
363 repo.try_with_wlock_no_wait(|| -> Result<(), CommandError> {
364 if let Some(mtime_boundary) = filesystem_time_at_status_start {
364 if let Some(mtime_boundary) = filesystem_time_at_status_start {
365 for hg_path in fixup {
365 for hg_path in fixup {
366 use std::os::unix::fs::MetadataExt;
366 use std::os::unix::fs::MetadataExt;
367 let fs_path = hg_path_to_path_buf(&hg_path)
367 let fs_path = hg_path_to_path_buf(&hg_path)
368 .expect("HgPath conversion");
368 .expect("HgPath conversion");
369 // Specifically do not reuse `fs_metadata` from
369 // Specifically do not reuse `fs_metadata` from
370 // `unsure_is_clean` which was needed before reading
370 // `unsure_is_clean` which was needed before reading
371 // contents. Here we access metadata again after reading
371 // contents. Here we access metadata again after reading
372 // content, in case it changed in the meantime.
372 // content, in case it changed in the meantime.
373 let fs_metadata = repo
373 let fs_metadata = repo
374 .working_directory_vfs()
374 .working_directory_vfs()
375 .symlink_metadata(&fs_path)?;
375 .symlink_metadata(&fs_path)?;
376 if let Some(mtime) =
376 if let Some(mtime) =
377 TruncatedTimestamp::for_reliable_mtime_of(
377 TruncatedTimestamp::for_reliable_mtime_of(
378 &fs_metadata,
378 &fs_metadata,
379 &mtime_boundary,
379 &mtime_boundary,
380 )
380 )
381 .when_reading_file(&fs_path)?
381 .when_reading_file(&fs_path)?
382 {
382 {
383 let mode = fs_metadata.mode();
383 let mode = fs_metadata.mode();
384 let size = fs_metadata.len() as u32 & RANGE_MASK_31BIT;
384 let size = fs_metadata.len() as u32 & RANGE_MASK_31BIT;
385 let mut entry = dmap
385 let mut entry = dmap
386 .get(&hg_path)?
386 .get(&hg_path)?
387 .expect("ambiguous file not in dirstate");
387 .expect("ambiguous file not in dirstate");
388 entry.set_clean(mode, size, mtime);
388 entry.set_clean(mode, size, mtime);
389 dmap.add_file(&hg_path, entry)?;
389 dmap.add_file(&hg_path, entry)?;
390 dirstate_write_needed = true
390 dirstate_write_needed = true
391 }
391 }
392 }
392 }
393 }
393 }
394 drop(dmap); // Avoid "already mutably borrowed" RefCell panics
394 drop(dmap); // Avoid "already mutably borrowed" RefCell panics
395 if dirstate_write_needed {
395 if dirstate_write_needed {
396 repo.write_dirstate()?
396 repo.write_dirstate()?
397 }
397 }
398 Ok(())
398 Ok(())
399 });
399 });
400 match with_lock_result {
400 match with_lock_result {
401 Ok(closure_result) => closure_result?,
401 Ok(closure_result) => closure_result?,
402 Err(LockError::AlreadyHeld) => {
402 Err(LockError::AlreadyHeld) => {
403 // Not updating the dirstate is not ideal but not critical:
403 // Not updating the dirstate is not ideal but not critical:
404 // don’t keep our caller waiting until some other Mercurial
404 // don’t keep our caller waiting until some other Mercurial
405 // process releases the lock.
405 // process releases the lock.
406 }
406 }
407 Err(LockError::Other(HgError::IoError { error, .. }))
407 Err(LockError::Other(HgError::IoError { error, .. }))
408 if error.kind() == io::ErrorKind::PermissionDenied =>
408 if error.kind() == io::ErrorKind::PermissionDenied =>
409 {
409 {
410 // `hg status` on a read-only repository is fine
410 // `hg status` on a read-only repository is fine
411 }
411 }
412 Err(LockError::Other(error)) => {
412 Err(LockError::Other(error)) => {
413 // Report other I/O errors
413 // Report other I/O errors
414 Err(error)?
414 Err(error)?
415 }
415 }
416 }
416 }
417 Ok(())
417 Ok(())
418 }
418 }
419
419
420 fn ignore_files(repo: &Repo, config: &Config) -> Vec<PathBuf> {
420 fn ignore_files(repo: &Repo, config: &Config) -> Vec<PathBuf> {
421 let mut ignore_files = Vec::new();
421 let mut ignore_files = Vec::new();
422 let repo_ignore = repo.working_directory_vfs().join(".hgignore");
422 let repo_ignore = repo.working_directory_vfs().join(".hgignore");
423 if repo_ignore.exists() {
423 if repo_ignore.exists() {
424 ignore_files.push(repo_ignore)
424 ignore_files.push(repo_ignore)
425 }
425 }
426 for (key, value) in config.iter_section(b"ui") {
426 for (key, value) in config.iter_section(b"ui") {
427 if key == b"ignore" || key.starts_with(b"ignore.") {
427 if key == b"ignore" || key.starts_with(b"ignore.") {
428 let path = get_path_from_bytes(value);
428 let path = get_path_from_bytes(value);
429 // TODO:Β expand "~/" and environment variable here, like Python
429 // TODO:Β expand "~/" and environment variable here, like Python
430 // does with `os.path.expanduser` and `os.path.expandvars`
430 // does with `os.path.expanduser` and `os.path.expandvars`
431
431
432 let joined = repo.working_directory_path().join(path);
432 let joined = repo.working_directory_path().join(path);
433 ignore_files.push(joined);
433 ignore_files.push(joined);
434 }
434 }
435 }
435 }
436 ignore_files
436 ignore_files
437 }
437 }
438
438
439 struct DisplayStatusPaths<'a> {
439 struct DisplayStatusPaths<'a> {
440 ui: &'a Ui,
440 ui: &'a Ui,
441 no_status: bool,
441 no_status: bool,
442 relativize: Option<RelativizePaths>,
442 relativize: Option<RelativizePaths>,
443 }
443 }
444
444
445 impl DisplayStatusPaths<'_> {
445 impl DisplayStatusPaths<'_> {
446 // Probably more elegant to use a Deref or Borrow trait rather than
446 // Probably more elegant to use a Deref or Borrow trait rather than
447 // harcode HgPathBuf, but probably not really useful at this point
447 // harcode HgPathBuf, but probably not really useful at this point
448 fn display(
448 fn display(
449 &self,
449 &self,
450 status_prefix: &[u8],
450 status_prefix: &[u8],
451 mut paths: Vec<StatusPath<'_>>,
451 mut paths: Vec<StatusPath<'_>>,
452 ) -> Result<(), CommandError> {
452 ) -> Result<(), CommandError> {
453 paths.sort_unstable();
453 paths.sort_unstable();
454 for StatusPath { path, copy_source } in paths {
454 for StatusPath { path, copy_source } in paths {
455 let relative;
455 let relative;
456 let path = if let Some(relativize) = &self.relativize {
456 let path = if let Some(relativize) = &self.relativize {
457 relative = relativize.relativize(&path);
457 relative = relativize.relativize(&path);
458 &*relative
458 &*relative
459 } else {
459 } else {
460 path.as_bytes()
460 path.as_bytes()
461 };
461 };
462 // TODO optim, probably lots of unneeded copies here, especially
462 // TODO optim, probably lots of unneeded copies here, especially
463 // if out stream is buffered
463 // if out stream is buffered
464 if self.no_status {
464 if self.no_status {
465 self.ui.write_stdout(&format_bytes!(b"{}\n", path))?
465 self.ui.write_stdout(&format_bytes!(b"{}\n", path))?
466 } else {
466 } else {
467 self.ui.write_stdout(&format_bytes!(
467 self.ui.write_stdout(&format_bytes!(
468 b"{} {}\n",
468 b"{} {}\n",
469 status_prefix,
469 status_prefix,
470 path
470 path
471 ))?
471 ))?
472 }
472 }
473 if let Some(source) = copy_source {
473 if let Some(source) = copy_source {
474 self.ui.write_stdout(&format_bytes!(
474 self.ui.write_stdout(&format_bytes!(
475 b" {}\n",
475 b" {}\n",
476 source.as_bytes()
476 source.as_bytes()
477 ))?
477 ))?
478 }
478 }
479 }
479 }
480 Ok(())
480 Ok(())
481 }
481 }
482 }
482 }
483
483
484 /// Check if a file is modified by comparing actual repo store and file system.
484 /// Check if a file is modified by comparing actual repo store and file system.
485 ///
485 ///
486 /// This meant to be used for those that the dirstate cannot resolve, due
486 /// This meant to be used for those that the dirstate cannot resolve, due
487 /// to time resolution limits.
487 /// to time resolution limits.
488 fn unsure_is_modified(
488 fn unsure_is_modified(
489 repo: &Repo,
489 repo: &Repo,
490 manifest: &Manifest,
490 manifest: &Manifest,
491 hg_path: &HgPath,
491 hg_path: &HgPath,
492 ) -> Result<bool, HgError> {
492 ) -> Result<bool, HgError> {
493 let vfs = repo.working_directory_vfs();
493 let vfs = repo.working_directory_vfs();
494 let fs_path = hg_path_to_path_buf(hg_path).expect("HgPath conversion");
494 let fs_path = hg_path_to_path_buf(hg_path).expect("HgPath conversion");
495 let fs_metadata = vfs.symlink_metadata(&fs_path)?;
495 let fs_metadata = vfs.symlink_metadata(&fs_path)?;
496 let is_symlink = fs_metadata.file_type().is_symlink();
496 let is_symlink = fs_metadata.file_type().is_symlink();
497 // TODO: Also account for `FALLBACK_SYMLINK` and `FALLBACK_EXEC` from the
497 // TODO: Also account for `FALLBACK_SYMLINK` and `FALLBACK_EXEC` from the
498 // dirstate
498 // dirstate
499 let fs_flags = if is_symlink {
499 let fs_flags = if is_symlink {
500 Some(b'l')
500 Some(b'l')
501 } else if has_exec_bit(&fs_metadata) {
501 } else if has_exec_bit(&fs_metadata) {
502 Some(b'x')
502 Some(b'x')
503 } else {
503 } else {
504 None
504 None
505 };
505 };
506
506
507 let entry = manifest
507 let entry = manifest
508 .find_by_path(hg_path)?
508 .find_by_path(hg_path)?
509 .expect("ambgious file not in p1");
509 .expect("ambgious file not in p1");
510 if entry.flags != fs_flags {
510 if entry.flags != fs_flags {
511 return Ok(true);
511 return Ok(true);
512 }
512 }
513 let filelog = repo.filelog(hg_path)?;
513 let filelog = repo.filelog(hg_path)?;
514 let fs_len = fs_metadata.len();
514 let fs_len = fs_metadata.len();
515 let filelog_entry =
515 let filelog_entry =
516 filelog.entry_for_node(entry.node_id()?).map_err(|_| {
516 filelog.entry_for_node(entry.node_id()?).map_err(|_| {
517 HgError::corrupted("filelog missing node from manifest")
517 HgError::corrupted("filelog missing node from manifest")
518 })?;
518 })?;
519 // TODO: check `fs_len` here like below, but based on
519 if filelog_entry.file_data_len_not_equal_to(fs_len) {
520 // `RevlogEntry::uncompressed_len` without decompressing the full filelog
520 // No need to read file contents:
521 // contents where possible. This is only valid if the revlog data does not
521 // it cannot be equal if it has a different length.
522 // contain metadata. See how Python’s `revlog.rawsize` calls
522 return Ok(true);
523 // `storageutil.filerevisioncopied`.
523 }
524 // (Maybe also check for content-modifying flags? See `revlog.size`.)
524
525 let filelog_data = filelog_entry.data()?;
525 let p1_filelog_data = filelog_entry.data()?;
526 let contents_in_p1 = filelog_data.file_data()?;
526 let p1_contents = p1_filelog_data.file_data()?;
527 if contents_in_p1.len() as u64 != fs_len {
527 if p1_contents.len() as u64 != fs_len {
528 // No need to read the file contents:
528 // No need to read file contents:
529 // it cannot be equal if it has a different length.
529 // it cannot be equal if it has a different length.
530 return Ok(true);
530 return Ok(true);
531 }
531 }
532
532
533 let fs_contents = if is_symlink {
533 let fs_contents = if is_symlink {
534 get_bytes_from_os_string(vfs.read_link(fs_path)?.into_os_string())
534 get_bytes_from_os_string(vfs.read_link(fs_path)?.into_os_string())
535 } else {
535 } else {
536 vfs.read(fs_path)?
536 vfs.read(fs_path)?
537 };
537 };
538 Ok(contents_in_p1 != &*fs_contents)
538 Ok(p1_contents != &*fs_contents)
539 }
539 }
@@ -1,632 +1,632 b''
1 ===============================================================
1 ===============================================================
2 Test non-regression on the corruption associated with issue6528
2 Test non-regression on the corruption associated with issue6528
3 ===============================================================
3 ===============================================================
4
4
5 Setup
5 Setup
6 =====
6 =====
7
7
8 $ hg init base-repo
8 $ hg init base-repo
9 $ cd base-repo
9 $ cd base-repo
10
10
11 $ cat <<EOF > a.txt
11 $ cat <<EOF > a.txt
12 > 1
12 > 1
13 > 2
13 > 2
14 > 3
14 > 3
15 > 4
15 > 4
16 > 5
16 > 5
17 > 6
17 > 6
18 > EOF
18 > EOF
19
19
20 $ hg add a.txt
20 $ hg add a.txt
21 $ hg commit -m 'c_base_c - create a.txt'
21 $ hg commit -m 'c_base_c - create a.txt'
22
22
23 Modify a.txt
23 Modify a.txt
24
24
25 $ sed -e 's/1/foo/' a.txt > a.tmp; mv a.tmp a.txt
25 $ sed -e 's/1/foo/' a.txt > a.tmp; mv a.tmp a.txt
26 $ hg commit -m 'c_modify_c - modify a.txt'
26 $ hg commit -m 'c_modify_c - modify a.txt'
27
27
28 Modify and rename a.txt to b.txt
28 Modify and rename a.txt to b.txt
29
29
30 $ hg up -r "desc('c_base_c')"
30 $ hg up -r "desc('c_base_c')"
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
31 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
32 $ sed -e 's/6/bar/' a.txt > a.tmp; mv a.tmp a.txt
32 $ sed -e 's/6/bar/' a.txt > a.tmp; mv a.tmp a.txt
33 $ hg mv a.txt b.txt
33 $ hg mv a.txt b.txt
34 $ hg commit -m 'c_rename_c - rename and modify a.txt to b.txt'
34 $ hg commit -m 'c_rename_c - rename and modify a.txt to b.txt'
35 created new head
35 created new head
36
36
37 Merge each branch
37 Merge each branch
38
38
39 $ hg merge -r "desc('c_modify_c')"
39 $ hg merge -r "desc('c_modify_c')"
40 merging b.txt and a.txt to b.txt
40 merging b.txt and a.txt to b.txt
41 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
41 0 files updated, 1 files merged, 0 files removed, 0 files unresolved
42 (branch merge, don't forget to commit)
42 (branch merge, don't forget to commit)
43 $ hg commit -m 'c_merge_c: commit merge'
43 $ hg commit -m 'c_merge_c: commit merge'
44
44
45 $ hg debugrevlogindex b.txt
45 $ hg debugrevlogindex b.txt
46 rev linkrev nodeid p1 p2
46 rev linkrev nodeid p1 p2
47 0 2 05b806ebe5ea 000000000000 000000000000
47 0 2 05b806ebe5ea 000000000000 000000000000
48 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
48 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
49
49
50 Check commit Graph
50 Check commit Graph
51
51
52 $ hg log -G
52 $ hg log -G
53 @ changeset: 3:a1cc2bdca0aa
53 @ changeset: 3:a1cc2bdca0aa
54 |\ tag: tip
54 |\ tag: tip
55 | | parent: 2:615c6ccefd15
55 | | parent: 2:615c6ccefd15
56 | | parent: 1:373d507f4667
56 | | parent: 1:373d507f4667
57 | | user: test
57 | | user: test
58 | | date: Thu Jan 01 00:00:00 1970 +0000
58 | | date: Thu Jan 01 00:00:00 1970 +0000
59 | | summary: c_merge_c: commit merge
59 | | summary: c_merge_c: commit merge
60 | |
60 | |
61 | o changeset: 2:615c6ccefd15
61 | o changeset: 2:615c6ccefd15
62 | | parent: 0:f5a5a568022f
62 | | parent: 0:f5a5a568022f
63 | | user: test
63 | | user: test
64 | | date: Thu Jan 01 00:00:00 1970 +0000
64 | | date: Thu Jan 01 00:00:00 1970 +0000
65 | | summary: c_rename_c - rename and modify a.txt to b.txt
65 | | summary: c_rename_c - rename and modify a.txt to b.txt
66 | |
66 | |
67 o | changeset: 1:373d507f4667
67 o | changeset: 1:373d507f4667
68 |/ user: test
68 |/ user: test
69 | date: Thu Jan 01 00:00:00 1970 +0000
69 | date: Thu Jan 01 00:00:00 1970 +0000
70 | summary: c_modify_c - modify a.txt
70 | summary: c_modify_c - modify a.txt
71 |
71 |
72 o changeset: 0:f5a5a568022f
72 o changeset: 0:f5a5a568022f
73 user: test
73 user: test
74 date: Thu Jan 01 00:00:00 1970 +0000
74 date: Thu Jan 01 00:00:00 1970 +0000
75 summary: c_base_c - create a.txt
75 summary: c_base_c - create a.txt
76
76
77
77
78 $ hg cat -r . b.txt
78 $ hg cat -r . b.txt
79 foo
79 foo
80 2
80 2
81 3
81 3
82 4
82 4
83 5
83 5
84 bar
84 bar
85 $ cat b.txt
85 $ cat b.txt
86 foo
86 foo
87 2
87 2
88 3
88 3
89 4
89 4
90 5
90 5
91 bar
91 bar
92 $ cd ..
92 $ cd ..
93
93
94
94
95 Check the lack of corruption
95 Check the lack of corruption
96 ============================
96 ============================
97
97
98 $ hg clone --pull base-repo cloned
98 $ hg clone --pull base-repo cloned
99 requesting all changes
99 requesting all changes
100 adding changesets
100 adding changesets
101 adding manifests
101 adding manifests
102 adding file changes
102 adding file changes
103 added 4 changesets with 4 changes to 2 files
103 added 4 changesets with 4 changes to 2 files
104 new changesets f5a5a568022f:a1cc2bdca0aa
104 new changesets f5a5a568022f:a1cc2bdca0aa
105 updating to branch default
105 updating to branch default
106 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
106 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
107 $ cd cloned
107 $ cd cloned
108 $ hg up -r "desc('c_merge_c')"
108 $ hg up -r "desc('c_merge_c')"
109 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
109 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
110
110
111
111
112 Status is buggy, even with debugrebuilddirstate
112 Status is buggy, even with debugrebuilddirstate
113
113
114 $ hg cat -r . b.txt
114 $ hg cat -r . b.txt
115 foo
115 foo
116 2
116 2
117 3
117 3
118 4
118 4
119 5
119 5
120 bar
120 bar
121 $ cat b.txt
121 $ cat b.txt
122 foo
122 foo
123 2
123 2
124 3
124 3
125 4
125 4
126 5
126 5
127 bar
127 bar
128 $ hg status
128 $ hg status
129 $ hg debugrebuilddirstate
129 $ hg debugrebuilddirstate
130 $ hg status
130 $ hg status
131
131
132 the history was altered
132 the history was altered
133
133
134 in theory p1/p2 order does not matter but in practice p1 == nullid is used as a
134 in theory p1/p2 order does not matter but in practice p1 == nullid is used as a
135 marker that some metadata are present and should be fetched.
135 marker that some metadata are present and should be fetched.
136
136
137 $ hg debugrevlogindex b.txt
137 $ hg debugrevlogindex b.txt
138 rev linkrev nodeid p1 p2
138 rev linkrev nodeid p1 p2
139 0 2 05b806ebe5ea 000000000000 000000000000
139 0 2 05b806ebe5ea 000000000000 000000000000
140 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
140 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
141
141
142 Check commit Graph
142 Check commit Graph
143
143
144 $ hg log -G
144 $ hg log -G
145 @ changeset: 3:a1cc2bdca0aa
145 @ changeset: 3:a1cc2bdca0aa
146 |\ tag: tip
146 |\ tag: tip
147 | | parent: 2:615c6ccefd15
147 | | parent: 2:615c6ccefd15
148 | | parent: 1:373d507f4667
148 | | parent: 1:373d507f4667
149 | | user: test
149 | | user: test
150 | | date: Thu Jan 01 00:00:00 1970 +0000
150 | | date: Thu Jan 01 00:00:00 1970 +0000
151 | | summary: c_merge_c: commit merge
151 | | summary: c_merge_c: commit merge
152 | |
152 | |
153 | o changeset: 2:615c6ccefd15
153 | o changeset: 2:615c6ccefd15
154 | | parent: 0:f5a5a568022f
154 | | parent: 0:f5a5a568022f
155 | | user: test
155 | | user: test
156 | | date: Thu Jan 01 00:00:00 1970 +0000
156 | | date: Thu Jan 01 00:00:00 1970 +0000
157 | | summary: c_rename_c - rename and modify a.txt to b.txt
157 | | summary: c_rename_c - rename and modify a.txt to b.txt
158 | |
158 | |
159 o | changeset: 1:373d507f4667
159 o | changeset: 1:373d507f4667
160 |/ user: test
160 |/ user: test
161 | date: Thu Jan 01 00:00:00 1970 +0000
161 | date: Thu Jan 01 00:00:00 1970 +0000
162 | summary: c_modify_c - modify a.txt
162 | summary: c_modify_c - modify a.txt
163 |
163 |
164 o changeset: 0:f5a5a568022f
164 o changeset: 0:f5a5a568022f
165 user: test
165 user: test
166 date: Thu Jan 01 00:00:00 1970 +0000
166 date: Thu Jan 01 00:00:00 1970 +0000
167 summary: c_base_c - create a.txt
167 summary: c_base_c - create a.txt
168
168
169
169
170 Test the command that fixes the issue
170 Test the command that fixes the issue
171 =====================================
171 =====================================
172
172
173 Restore a broken repository with multiple broken revisions and a filename that
173 Restore a broken repository with multiple broken revisions and a filename that
174 would get encoded to test the `report` options.
174 would get encoded to test the `report` options.
175 It's a tarball because unbundle might magically fix the issue later.
175 It's a tarball because unbundle might magically fix the issue later.
176
176
177 $ cd ..
177 $ cd ..
178 $ mkdir repo-to-fix
178 $ mkdir repo-to-fix
179 $ cd repo-to-fix
179 $ cd repo-to-fix
180 #if windows
180 #if windows
181 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
181 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
182 only since some versions of tar don't have this flag.
182 only since some versions of tar don't have this flag.
183
183
184 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
184 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
185 #else
185 #else
186 $ tar xf $TESTDIR/bundles/issue6528.tar
186 $ tar xf $TESTDIR/bundles/issue6528.tar
187 #endif
187 #endif
188
188
189 Check that the issue is present
189 Check that the issue is present
190 (It is currently not present with rhg but will be when optimizations are added
190 (It is currently not present with rhg but will be when optimizations are added
191 to resolve ambiguous files at the end of status without reading their content
191 to resolve ambiguous files at the end of status without reading their content
192 if the size differs, and reading the expected size without resolving filelog
192 if the size differs, and reading the expected size without resolving filelog
193 deltas where possible.)
193 deltas where possible.)
194
194
195 $ hg st
195 $ hg st
196 M D.txt (no-rhg !)
196 M D.txt
197 M b.txt (no-rhg !)
197 M b.txt
198 $ hg debugrevlogindex b.txt
198 $ hg debugrevlogindex b.txt
199 rev linkrev nodeid p1 p2
199 rev linkrev nodeid p1 p2
200 0 2 05b806ebe5ea 000000000000 000000000000
200 0 2 05b806ebe5ea 000000000000 000000000000
201 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
201 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
202 2 6 216a5fe8b8ed 000000000000 000000000000
202 2 6 216a5fe8b8ed 000000000000 000000000000
203 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
203 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
204 $ hg debugrevlogindex D.txt
204 $ hg debugrevlogindex D.txt
205 rev linkrev nodeid p1 p2
205 rev linkrev nodeid p1 p2
206 0 6 2a8d3833f2fb 000000000000 000000000000
206 0 6 2a8d3833f2fb 000000000000 000000000000
207 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
207 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
208
208
209 Dry-run the fix
209 Dry-run the fix
210 $ hg debug-repair-issue6528 --dry-run
210 $ hg debug-repair-issue6528 --dry-run
211 found affected revision 1 for filelog 'data/D.txt.i'
211 found affected revision 1 for filelog 'data/D.txt.i'
212 found affected revision 1 for filelog 'data/b.txt.i'
212 found affected revision 1 for filelog 'data/b.txt.i'
213 found affected revision 3 for filelog 'data/b.txt.i'
213 found affected revision 3 for filelog 'data/b.txt.i'
214 $ hg st
214 $ hg st
215 M D.txt (no-rhg !)
215 M D.txt
216 M b.txt (no-rhg !)
216 M b.txt
217 $ hg debugrevlogindex b.txt
217 $ hg debugrevlogindex b.txt
218 rev linkrev nodeid p1 p2
218 rev linkrev nodeid p1 p2
219 0 2 05b806ebe5ea 000000000000 000000000000
219 0 2 05b806ebe5ea 000000000000 000000000000
220 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
220 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
221 2 6 216a5fe8b8ed 000000000000 000000000000
221 2 6 216a5fe8b8ed 000000000000 000000000000
222 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
222 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
223 $ hg debugrevlogindex D.txt
223 $ hg debugrevlogindex D.txt
224 rev linkrev nodeid p1 p2
224 rev linkrev nodeid p1 p2
225 0 6 2a8d3833f2fb 000000000000 000000000000
225 0 6 2a8d3833f2fb 000000000000 000000000000
226 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
226 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
227
227
228 Test the --paranoid option
228 Test the --paranoid option
229 $ hg debug-repair-issue6528 --dry-run --paranoid
229 $ hg debug-repair-issue6528 --dry-run --paranoid
230 found affected revision 1 for filelog 'data/D.txt.i'
230 found affected revision 1 for filelog 'data/D.txt.i'
231 found affected revision 1 for filelog 'data/b.txt.i'
231 found affected revision 1 for filelog 'data/b.txt.i'
232 found affected revision 3 for filelog 'data/b.txt.i'
232 found affected revision 3 for filelog 'data/b.txt.i'
233 $ hg st
233 $ hg st
234 M D.txt (no-rhg !)
234 M D.txt
235 M b.txt (no-rhg !)
235 M b.txt
236 $ hg debugrevlogindex b.txt
236 $ hg debugrevlogindex b.txt
237 rev linkrev nodeid p1 p2
237 rev linkrev nodeid p1 p2
238 0 2 05b806ebe5ea 000000000000 000000000000
238 0 2 05b806ebe5ea 000000000000 000000000000
239 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
239 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
240 2 6 216a5fe8b8ed 000000000000 000000000000
240 2 6 216a5fe8b8ed 000000000000 000000000000
241 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
241 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
242 $ hg debugrevlogindex D.txt
242 $ hg debugrevlogindex D.txt
243 rev linkrev nodeid p1 p2
243 rev linkrev nodeid p1 p2
244 0 6 2a8d3833f2fb 000000000000 000000000000
244 0 6 2a8d3833f2fb 000000000000 000000000000
245 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
245 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
246
246
247 Run the fix
247 Run the fix
248 $ hg debug-repair-issue6528
248 $ hg debug-repair-issue6528
249 found affected revision 1 for filelog 'data/D.txt.i'
249 found affected revision 1 for filelog 'data/D.txt.i'
250 repaired revision 1 of 'filelog data/D.txt.i'
250 repaired revision 1 of 'filelog data/D.txt.i'
251 found affected revision 1 for filelog 'data/b.txt.i'
251 found affected revision 1 for filelog 'data/b.txt.i'
252 found affected revision 3 for filelog 'data/b.txt.i'
252 found affected revision 3 for filelog 'data/b.txt.i'
253 repaired revision 1 of 'filelog data/b.txt.i'
253 repaired revision 1 of 'filelog data/b.txt.i'
254 repaired revision 3 of 'filelog data/b.txt.i'
254 repaired revision 3 of 'filelog data/b.txt.i'
255
255
256 Check that the fix worked and that running it twice does nothing
256 Check that the fix worked and that running it twice does nothing
257 $ hg st
257 $ hg st
258 $ hg debugrevlogindex b.txt
258 $ hg debugrevlogindex b.txt
259 rev linkrev nodeid p1 p2
259 rev linkrev nodeid p1 p2
260 0 2 05b806ebe5ea 000000000000 000000000000
260 0 2 05b806ebe5ea 000000000000 000000000000
261 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
261 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
262 2 6 216a5fe8b8ed 000000000000 000000000000
262 2 6 216a5fe8b8ed 000000000000 000000000000
263 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
263 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
264 $ hg debugrevlogindex D.txt
264 $ hg debugrevlogindex D.txt
265 rev linkrev nodeid p1 p2
265 rev linkrev nodeid p1 p2
266 0 6 2a8d3833f2fb 000000000000 000000000000
266 0 6 2a8d3833f2fb 000000000000 000000000000
267 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
267 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
268 $ hg debug-repair-issue6528
268 $ hg debug-repair-issue6528
269 no affected revisions were found
269 no affected revisions were found
270 $ hg st
270 $ hg st
271 $ hg debugrevlogindex b.txt
271 $ hg debugrevlogindex b.txt
272 rev linkrev nodeid p1 p2
272 rev linkrev nodeid p1 p2
273 0 2 05b806ebe5ea 000000000000 000000000000
273 0 2 05b806ebe5ea 000000000000 000000000000
274 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
274 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
275 2 6 216a5fe8b8ed 000000000000 000000000000
275 2 6 216a5fe8b8ed 000000000000 000000000000
276 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
276 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
277 $ hg debugrevlogindex D.txt
277 $ hg debugrevlogindex D.txt
278 rev linkrev nodeid p1 p2
278 rev linkrev nodeid p1 p2
279 0 6 2a8d3833f2fb 000000000000 000000000000
279 0 6 2a8d3833f2fb 000000000000 000000000000
280 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
280 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
281
281
282 Try the using the report options
282 Try the using the report options
283 --------------------------------
283 --------------------------------
284
284
285 $ cd ..
285 $ cd ..
286 $ mkdir repo-to-fix-report
286 $ mkdir repo-to-fix-report
287 $ cd repo-to-fix
287 $ cd repo-to-fix
288 #if windows
288 #if windows
289 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
289 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
290 only since some versions of tar don't have this flag.
290 only since some versions of tar don't have this flag.
291
291
292 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
292 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
293 #else
293 #else
294 $ tar xf $TESTDIR/bundles/issue6528.tar
294 $ tar xf $TESTDIR/bundles/issue6528.tar
295 #endif
295 #endif
296
296
297 $ hg debug-repair-issue6528 --to-report $TESTTMP/report.txt
297 $ hg debug-repair-issue6528 --to-report $TESTTMP/report.txt
298 found affected revision 1 for filelog 'data/D.txt.i'
298 found affected revision 1 for filelog 'data/D.txt.i'
299 found affected revision 1 for filelog 'data/b.txt.i'
299 found affected revision 1 for filelog 'data/b.txt.i'
300 found affected revision 3 for filelog 'data/b.txt.i'
300 found affected revision 3 for filelog 'data/b.txt.i'
301 $ cat $TESTTMP/report.txt
301 $ cat $TESTTMP/report.txt
302 2a80419dfc31d7dfb308ac40f3f138282de7d73b D.txt
302 2a80419dfc31d7dfb308ac40f3f138282de7d73b D.txt
303 a58b36ad6b6545195952793099613c2116f3563b,ea4f2f2463cca5b29ddf3461012b8ce5c6dac175 b.txt
303 a58b36ad6b6545195952793099613c2116f3563b,ea4f2f2463cca5b29ddf3461012b8ce5c6dac175 b.txt
304
304
305 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt --dry-run
305 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt --dry-run
306 loading report file '$TESTTMP/report.txt'
306 loading report file '$TESTTMP/report.txt'
307 found affected revision 1 for filelog 'D.txt'
307 found affected revision 1 for filelog 'D.txt'
308 found affected revision 1 for filelog 'b.txt'
308 found affected revision 1 for filelog 'b.txt'
309 found affected revision 3 for filelog 'b.txt'
309 found affected revision 3 for filelog 'b.txt'
310 $ hg st
310 $ hg st
311 M D.txt (no-rhg !)
311 M D.txt
312 M b.txt (no-rhg !)
312 M b.txt
313 $ hg debugrevlogindex b.txt
313 $ hg debugrevlogindex b.txt
314 rev linkrev nodeid p1 p2
314 rev linkrev nodeid p1 p2
315 0 2 05b806ebe5ea 000000000000 000000000000
315 0 2 05b806ebe5ea 000000000000 000000000000
316 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
316 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
317 2 6 216a5fe8b8ed 000000000000 000000000000
317 2 6 216a5fe8b8ed 000000000000 000000000000
318 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
318 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
319 $ hg debugrevlogindex D.txt
319 $ hg debugrevlogindex D.txt
320 rev linkrev nodeid p1 p2
320 rev linkrev nodeid p1 p2
321 0 6 2a8d3833f2fb 000000000000 000000000000
321 0 6 2a8d3833f2fb 000000000000 000000000000
322 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
322 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
323
323
324 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt
324 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt
325 loading report file '$TESTTMP/report.txt'
325 loading report file '$TESTTMP/report.txt'
326 found affected revision 1 for filelog 'D.txt'
326 found affected revision 1 for filelog 'D.txt'
327 repaired revision 1 of 'filelog data/D.txt.i'
327 repaired revision 1 of 'filelog data/D.txt.i'
328 found affected revision 1 for filelog 'b.txt'
328 found affected revision 1 for filelog 'b.txt'
329 found affected revision 3 for filelog 'b.txt'
329 found affected revision 3 for filelog 'b.txt'
330 repaired revision 1 of 'filelog data/b.txt.i'
330 repaired revision 1 of 'filelog data/b.txt.i'
331 repaired revision 3 of 'filelog data/b.txt.i'
331 repaired revision 3 of 'filelog data/b.txt.i'
332 $ hg st
332 $ hg st
333 $ hg debugrevlogindex b.txt
333 $ hg debugrevlogindex b.txt
334 rev linkrev nodeid p1 p2
334 rev linkrev nodeid p1 p2
335 0 2 05b806ebe5ea 000000000000 000000000000
335 0 2 05b806ebe5ea 000000000000 000000000000
336 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
336 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
337 2 6 216a5fe8b8ed 000000000000 000000000000
337 2 6 216a5fe8b8ed 000000000000 000000000000
338 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
338 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
339 $ hg debugrevlogindex D.txt
339 $ hg debugrevlogindex D.txt
340 rev linkrev nodeid p1 p2
340 rev linkrev nodeid p1 p2
341 0 6 2a8d3833f2fb 000000000000 000000000000
341 0 6 2a8d3833f2fb 000000000000 000000000000
342 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
342 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
343
343
344 Check that the revision is not "fixed" again
344 Check that the revision is not "fixed" again
345
345
346 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt
346 $ hg debug-repair-issue6528 --from-report $TESTTMP/report.txt
347 loading report file '$TESTTMP/report.txt'
347 loading report file '$TESTTMP/report.txt'
348 revision 2a80419dfc31d7dfb308ac40f3f138282de7d73b of file 'D.txt' is not affected
348 revision 2a80419dfc31d7dfb308ac40f3f138282de7d73b of file 'D.txt' is not affected
349 no affected revisions were found for 'D.txt'
349 no affected revisions were found for 'D.txt'
350 revision a58b36ad6b6545195952793099613c2116f3563b of file 'b.txt' is not affected
350 revision a58b36ad6b6545195952793099613c2116f3563b of file 'b.txt' is not affected
351 revision ea4f2f2463cca5b29ddf3461012b8ce5c6dac175 of file 'b.txt' is not affected
351 revision ea4f2f2463cca5b29ddf3461012b8ce5c6dac175 of file 'b.txt' is not affected
352 no affected revisions were found for 'b.txt'
352 no affected revisions were found for 'b.txt'
353 $ hg st
353 $ hg st
354 $ hg debugrevlogindex b.txt
354 $ hg debugrevlogindex b.txt
355 rev linkrev nodeid p1 p2
355 rev linkrev nodeid p1 p2
356 0 2 05b806ebe5ea 000000000000 000000000000
356 0 2 05b806ebe5ea 000000000000 000000000000
357 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
357 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
358 2 6 216a5fe8b8ed 000000000000 000000000000
358 2 6 216a5fe8b8ed 000000000000 000000000000
359 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
359 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
360 $ hg debugrevlogindex D.txt
360 $ hg debugrevlogindex D.txt
361 rev linkrev nodeid p1 p2
361 rev linkrev nodeid p1 p2
362 0 6 2a8d3833f2fb 000000000000 000000000000
362 0 6 2a8d3833f2fb 000000000000 000000000000
363 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
363 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
364
364
365 Try it with a non-inline revlog
365 Try it with a non-inline revlog
366 -------------------------------
366 -------------------------------
367
367
368 $ cd ..
368 $ cd ..
369 $ mkdir $TESTTMP/ext
369 $ mkdir $TESTTMP/ext
370 $ cat << EOF > $TESTTMP/ext/small_inline.py
370 $ cat << EOF > $TESTTMP/ext/small_inline.py
371 > from mercurial import revlog
371 > from mercurial import revlog
372 > revlog._maxinline = 8
372 > revlog._maxinline = 8
373 > EOF
373 > EOF
374
374
375 $ cat << EOF >> $HGRCPATH
375 $ cat << EOF >> $HGRCPATH
376 > [extensions]
376 > [extensions]
377 > small_inline=$TESTTMP/ext/small_inline.py
377 > small_inline=$TESTTMP/ext/small_inline.py
378 > EOF
378 > EOF
379
379
380 $ mkdir repo-to-fix-not-inline
380 $ mkdir repo-to-fix-not-inline
381 $ cd repo-to-fix-not-inline
381 $ cd repo-to-fix-not-inline
382 #if windows
382 #if windows
383 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
383 tar interprets `:` in paths (like `C:`) as being remote, force local on Windows
384 only since some versions of tar don't have this flag.
384 only since some versions of tar don't have this flag.
385
385
386 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
386 $ tar --force-local -xf $TESTDIR/bundles/issue6528.tar
387 #else
387 #else
388 $ tar xf $TESTDIR/bundles/issue6528.tar
388 $ tar xf $TESTDIR/bundles/issue6528.tar
389 #endif
389 #endif
390 $ echo b >> b.txt
390 $ echo b >> b.txt
391 $ hg commit -qm "inline -> separate"
391 $ hg commit -qm "inline -> separate"
392 $ find .hg -name *b.txt.d
392 $ find .hg -name *b.txt.d
393 .hg/store/data/b.txt.d
393 .hg/store/data/b.txt.d
394
394
395 Status is correct, but the problem is still there, in the earlier revision
395 Status is correct, but the problem is still there, in the earlier revision
396 $ hg st
396 $ hg st
397 $ hg up 3
397 $ hg up 3
398 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
398 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
399 $ hg st
399 $ hg st
400 M b.txt
400 M b.txt
401 $ hg debugrevlogindex b.txt
401 $ hg debugrevlogindex b.txt
402 rev linkrev nodeid p1 p2
402 rev linkrev nodeid p1 p2
403 0 2 05b806ebe5ea 000000000000 000000000000
403 0 2 05b806ebe5ea 000000000000 000000000000
404 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
404 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
405 2 6 216a5fe8b8ed 000000000000 000000000000
405 2 6 216a5fe8b8ed 000000000000 000000000000
406 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
406 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
407 4 8 db234885e2fe ea4f2f2463cc 000000000000
407 4 8 db234885e2fe ea4f2f2463cc 000000000000
408 $ hg debugrevlogindex D.txt
408 $ hg debugrevlogindex D.txt
409 rev linkrev nodeid p1 p2
409 rev linkrev nodeid p1 p2
410 0 6 2a8d3833f2fb 000000000000 000000000000
410 0 6 2a8d3833f2fb 000000000000 000000000000
411 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
411 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
412 2 8 65aecc89bb5d 2a80419dfc31 000000000000
412 2 8 65aecc89bb5d 2a80419dfc31 000000000000
413
413
414 Run the fix on the non-inline revlog
414 Run the fix on the non-inline revlog
415 $ hg debug-repair-issue6528
415 $ hg debug-repair-issue6528
416 found affected revision 1 for filelog 'data/D.txt.i'
416 found affected revision 1 for filelog 'data/D.txt.i'
417 repaired revision 1 of 'filelog data/D.txt.i'
417 repaired revision 1 of 'filelog data/D.txt.i'
418 found affected revision 1 for filelog 'data/b.txt.i'
418 found affected revision 1 for filelog 'data/b.txt.i'
419 found affected revision 3 for filelog 'data/b.txt.i'
419 found affected revision 3 for filelog 'data/b.txt.i'
420 repaired revision 1 of 'filelog data/b.txt.i'
420 repaired revision 1 of 'filelog data/b.txt.i'
421 repaired revision 3 of 'filelog data/b.txt.i'
421 repaired revision 3 of 'filelog data/b.txt.i'
422
422
423 Check that it worked
423 Check that it worked
424 $ hg debugrevlogindex b.txt
424 $ hg debugrevlogindex b.txt
425 rev linkrev nodeid p1 p2
425 rev linkrev nodeid p1 p2
426 0 2 05b806ebe5ea 000000000000 000000000000
426 0 2 05b806ebe5ea 000000000000 000000000000
427 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
427 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
428 2 6 216a5fe8b8ed 000000000000 000000000000
428 2 6 216a5fe8b8ed 000000000000 000000000000
429 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
429 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
430 4 8 db234885e2fe ea4f2f2463cc 000000000000
430 4 8 db234885e2fe ea4f2f2463cc 000000000000
431 $ hg debugrevlogindex D.txt
431 $ hg debugrevlogindex D.txt
432 rev linkrev nodeid p1 p2
432 rev linkrev nodeid p1 p2
433 0 6 2a8d3833f2fb 000000000000 000000000000
433 0 6 2a8d3833f2fb 000000000000 000000000000
434 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
434 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
435 2 8 65aecc89bb5d 2a80419dfc31 000000000000
435 2 8 65aecc89bb5d 2a80419dfc31 000000000000
436 $ hg debug-repair-issue6528
436 $ hg debug-repair-issue6528
437 no affected revisions were found
437 no affected revisions were found
438 $ hg st
438 $ hg st
439
439
440 $ cd ..
440 $ cd ..
441
441
442 Applying a bad bundle should fix it on the fly
442 Applying a bad bundle should fix it on the fly
443 ----------------------------------------------
443 ----------------------------------------------
444
444
445 from a v1 bundle
445 from a v1 bundle
446 ~~~~~~~~~~~~~~~~
446 ~~~~~~~~~~~~~~~~
447
447
448 $ hg debugbundle --spec "$TESTDIR"/bundles/issue6528.hg-v1
448 $ hg debugbundle --spec "$TESTDIR"/bundles/issue6528.hg-v1
449 bzip2-v1
449 bzip2-v1
450
450
451 $ hg init unbundle-v1
451 $ hg init unbundle-v1
452 $ cd unbundle-v1
452 $ cd unbundle-v1
453
453
454 $ hg unbundle "$TESTDIR"/bundles/issue6528.hg-v1
454 $ hg unbundle "$TESTDIR"/bundles/issue6528.hg-v1
455 adding changesets
455 adding changesets
456 adding manifests
456 adding manifests
457 adding file changes
457 adding file changes
458 added 8 changesets with 12 changes to 4 files
458 added 8 changesets with 12 changes to 4 files
459 new changesets f5a5a568022f:3beabb508514 (8 drafts)
459 new changesets f5a5a568022f:3beabb508514 (8 drafts)
460 (run 'hg update' to get a working copy)
460 (run 'hg update' to get a working copy)
461
461
462 Check that revision were fixed on the fly
462 Check that revision were fixed on the fly
463
463
464 $ hg debugrevlogindex b.txt
464 $ hg debugrevlogindex b.txt
465 rev linkrev nodeid p1 p2
465 rev linkrev nodeid p1 p2
466 0 2 05b806ebe5ea 000000000000 000000000000
466 0 2 05b806ebe5ea 000000000000 000000000000
467 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
467 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
468 2 6 216a5fe8b8ed 000000000000 000000000000
468 2 6 216a5fe8b8ed 000000000000 000000000000
469 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
469 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
470
470
471 $ hg debugrevlogindex D.txt
471 $ hg debugrevlogindex D.txt
472 rev linkrev nodeid p1 p2
472 rev linkrev nodeid p1 p2
473 0 6 2a8d3833f2fb 000000000000 000000000000
473 0 6 2a8d3833f2fb 000000000000 000000000000
474 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
474 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
475
475
476 That we don't see the symptoms of the bug
476 That we don't see the symptoms of the bug
477
477
478 $ hg up -- -1
478 $ hg up -- -1
479 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
479 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
480 $ hg status
480 $ hg status
481
481
482 And that the repair command does not find anything to fix
482 And that the repair command does not find anything to fix
483
483
484 $ hg debug-repair-issue6528
484 $ hg debug-repair-issue6528
485 no affected revisions were found
485 no affected revisions were found
486
486
487 $ cd ..
487 $ cd ..
488
488
489 from a v2 bundle
489 from a v2 bundle
490 ~~~~~~~~~~~~~~~~
490 ~~~~~~~~~~~~~~~~
491
491
492 $ hg debugbundle --spec "$TESTDIR"/bundles/issue6528.hg-v2
492 $ hg debugbundle --spec "$TESTDIR"/bundles/issue6528.hg-v2
493 bzip2-v2
493 bzip2-v2
494
494
495 $ hg init unbundle-v2
495 $ hg init unbundle-v2
496 $ cd unbundle-v2
496 $ cd unbundle-v2
497
497
498 $ hg unbundle "$TESTDIR"/bundles/issue6528.hg-v2
498 $ hg unbundle "$TESTDIR"/bundles/issue6528.hg-v2
499 adding changesets
499 adding changesets
500 adding manifests
500 adding manifests
501 adding file changes
501 adding file changes
502 added 8 changesets with 12 changes to 4 files
502 added 8 changesets with 12 changes to 4 files
503 new changesets f5a5a568022f:3beabb508514 (8 drafts)
503 new changesets f5a5a568022f:3beabb508514 (8 drafts)
504 (run 'hg update' to get a working copy)
504 (run 'hg update' to get a working copy)
505
505
506 Check that revision were fixed on the fly
506 Check that revision were fixed on the fly
507
507
508 $ hg debugrevlogindex b.txt
508 $ hg debugrevlogindex b.txt
509 rev linkrev nodeid p1 p2
509 rev linkrev nodeid p1 p2
510 0 2 05b806ebe5ea 000000000000 000000000000
510 0 2 05b806ebe5ea 000000000000 000000000000
511 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
511 1 3 a58b36ad6b65 000000000000 05b806ebe5ea
512 2 6 216a5fe8b8ed 000000000000 000000000000
512 2 6 216a5fe8b8ed 000000000000 000000000000
513 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
513 3 7 ea4f2f2463cc 000000000000 216a5fe8b8ed
514
514
515 $ hg debugrevlogindex D.txt
515 $ hg debugrevlogindex D.txt
516 rev linkrev nodeid p1 p2
516 rev linkrev nodeid p1 p2
517 0 6 2a8d3833f2fb 000000000000 000000000000
517 0 6 2a8d3833f2fb 000000000000 000000000000
518 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
518 1 7 2a80419dfc31 000000000000 2a8d3833f2fb
519
519
520 That we don't see the symptoms of the bug
520 That we don't see the symptoms of the bug
521
521
522 $ hg up -- -1
522 $ hg up -- -1
523 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
523 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
524 $ hg status
524 $ hg status
525
525
526 And that the repair command does not find anything to fix
526 And that the repair command does not find anything to fix
527
527
528 $ hg debug-repair-issue6528
528 $ hg debug-repair-issue6528
529 no affected revisions were found
529 no affected revisions were found
530
530
531 $ cd ..
531 $ cd ..
532
532
533 A config option can disable the fixing of the bad bundle on the fly
533 A config option can disable the fixing of the bad bundle on the fly
534 -------------------------------------------------------------------
534 -------------------------------------------------------------------
535
535
536
536
537
537
538 from a v1 bundle
538 from a v1 bundle
539 ~~~~~~~~~~~~~~~~
539 ~~~~~~~~~~~~~~~~
540
540
541 $ hg debugbundle --spec "$TESTDIR"/bundles/issue6528.hg-v1
541 $ hg debugbundle --spec "$TESTDIR"/bundles/issue6528.hg-v1
542 bzip2-v1
542 bzip2-v1
543
543
544 $ hg init unbundle-v1-no-fix
544 $ hg init unbundle-v1-no-fix
545 $ cd unbundle-v1-no-fix
545 $ cd unbundle-v1-no-fix
546
546
547 $ hg unbundle "$TESTDIR"/bundles/issue6528.hg-v1 --config storage.revlog.issue6528.fix-incoming=no
547 $ hg unbundle "$TESTDIR"/bundles/issue6528.hg-v1 --config storage.revlog.issue6528.fix-incoming=no
548 adding changesets
548 adding changesets
549 adding manifests
549 adding manifests
550 adding file changes
550 adding file changes
551 added 8 changesets with 12 changes to 4 files
551 added 8 changesets with 12 changes to 4 files
552 new changesets f5a5a568022f:3beabb508514 (8 drafts)
552 new changesets f5a5a568022f:3beabb508514 (8 drafts)
553 (run 'hg update' to get a working copy)
553 (run 'hg update' to get a working copy)
554
554
555 Check that revision were not fixed on the fly
555 Check that revision were not fixed on the fly
556
556
557 $ hg debugrevlogindex b.txt
557 $ hg debugrevlogindex b.txt
558 rev linkrev nodeid p1 p2
558 rev linkrev nodeid p1 p2
559 0 2 05b806ebe5ea 000000000000 000000000000
559 0 2 05b806ebe5ea 000000000000 000000000000
560 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
560 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
561 2 6 216a5fe8b8ed 000000000000 000000000000
561 2 6 216a5fe8b8ed 000000000000 000000000000
562 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
562 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
563
563
564 $ hg debugrevlogindex D.txt
564 $ hg debugrevlogindex D.txt
565 rev linkrev nodeid p1 p2
565 rev linkrev nodeid p1 p2
566 0 6 2a8d3833f2fb 000000000000 000000000000
566 0 6 2a8d3833f2fb 000000000000 000000000000
567 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
567 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
568
568
569 That we do see the symptoms of the bug
569 That we do see the symptoms of the bug
570
570
571 $ hg up -- -1
571 $ hg up -- -1
572 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
572 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
573 $ hg status
573 $ hg status
574 M D.txt (?)
574 M D.txt (?)
575 M b.txt (?)
575 M b.txt (?)
576
576
577 And that the repair command find issue to fix.
577 And that the repair command find issue to fix.
578
578
579 $ hg debug-repair-issue6528 --dry-run
579 $ hg debug-repair-issue6528 --dry-run
580 found affected revision 1 for filelog 'data/D.txt.i'
580 found affected revision 1 for filelog 'data/D.txt.i'
581 found affected revision 1 for filelog 'data/b.txt.i'
581 found affected revision 1 for filelog 'data/b.txt.i'
582 found affected revision 3 for filelog 'data/b.txt.i'
582 found affected revision 3 for filelog 'data/b.txt.i'
583
583
584 $ cd ..
584 $ cd ..
585
585
586 from a v2 bundle
586 from a v2 bundle
587 ~~~~~~~~~~~~~~~~
587 ~~~~~~~~~~~~~~~~
588
588
589 $ hg debugbundle --spec "$TESTDIR"/bundles/issue6528.hg-v2
589 $ hg debugbundle --spec "$TESTDIR"/bundles/issue6528.hg-v2
590 bzip2-v2
590 bzip2-v2
591
591
592 $ hg init unbundle-v2-no-fix
592 $ hg init unbundle-v2-no-fix
593 $ cd unbundle-v2-no-fix
593 $ cd unbundle-v2-no-fix
594
594
595 $ hg unbundle "$TESTDIR"/bundles/issue6528.hg-v2 --config storage.revlog.issue6528.fix-incoming=no
595 $ hg unbundle "$TESTDIR"/bundles/issue6528.hg-v2 --config storage.revlog.issue6528.fix-incoming=no
596 adding changesets
596 adding changesets
597 adding manifests
597 adding manifests
598 adding file changes
598 adding file changes
599 added 8 changesets with 12 changes to 4 files
599 added 8 changesets with 12 changes to 4 files
600 new changesets f5a5a568022f:3beabb508514 (8 drafts)
600 new changesets f5a5a568022f:3beabb508514 (8 drafts)
601 (run 'hg update' to get a working copy)
601 (run 'hg update' to get a working copy)
602
602
603 Check that revision were not fixed on the fly
603 Check that revision were not fixed on the fly
604
604
605 $ hg debugrevlogindex b.txt
605 $ hg debugrevlogindex b.txt
606 rev linkrev nodeid p1 p2
606 rev linkrev nodeid p1 p2
607 0 2 05b806ebe5ea 000000000000 000000000000
607 0 2 05b806ebe5ea 000000000000 000000000000
608 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
608 1 3 a58b36ad6b65 05b806ebe5ea 000000000000
609 2 6 216a5fe8b8ed 000000000000 000000000000
609 2 6 216a5fe8b8ed 000000000000 000000000000
610 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
610 3 7 ea4f2f2463cc 216a5fe8b8ed 000000000000
611
611
612 $ hg debugrevlogindex D.txt
612 $ hg debugrevlogindex D.txt
613 rev linkrev nodeid p1 p2
613 rev linkrev nodeid p1 p2
614 0 6 2a8d3833f2fb 000000000000 000000000000
614 0 6 2a8d3833f2fb 000000000000 000000000000
615 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
615 1 7 2a80419dfc31 2a8d3833f2fb 000000000000
616
616
617 That we do see the symptoms of the bug
617 That we do see the symptoms of the bug
618
618
619 $ hg up -- -1
619 $ hg up -- -1
620 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
620 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
621 $ hg status
621 $ hg status
622 M D.txt (?)
622 M D.txt (?)
623 M b.txt (?)
623 M b.txt (?)
624
624
625 And that the repair command find issue to fix.
625 And that the repair command find issue to fix.
626
626
627 $ hg debug-repair-issue6528 --dry-run
627 $ hg debug-repair-issue6528 --dry-run
628 found affected revision 1 for filelog 'data/D.txt.i'
628 found affected revision 1 for filelog 'data/D.txt.i'
629 found affected revision 1 for filelog 'data/b.txt.i'
629 found affected revision 1 for filelog 'data/b.txt.i'
630 found affected revision 3 for filelog 'data/b.txt.i'
630 found affected revision 3 for filelog 'data/b.txt.i'
631
631
632 $ cd ..
632 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now