##// END OF EJS Templates
rust-dirstate-v2: fix the unused bytes counter when rewriting the dirstate...
Raphaël Gomès -
r50050:09984dc7 stable
parent child Browse files
Show More
@@ -1,844 +1,849 b''
1 //! The "version 2" disk representation of the dirstate
1 //! The "version 2" disk representation of the dirstate
2 //!
2 //!
3 //! See `mercurial/helptext/internals/dirstate-v2.txt`
3 //! See `mercurial/helptext/internals/dirstate-v2.txt`
4
4
5 use crate::dirstate::TruncatedTimestamp;
5 use crate::dirstate::TruncatedTimestamp;
6 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
6 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
7 use crate::dirstate_tree::path_with_basename::WithBasename;
7 use crate::dirstate_tree::path_with_basename::WithBasename;
8 use crate::errors::HgError;
8 use crate::errors::HgError;
9 use crate::utils::hg_path::HgPath;
9 use crate::utils::hg_path::HgPath;
10 use crate::DirstateEntry;
10 use crate::DirstateEntry;
11 use crate::DirstateError;
11 use crate::DirstateError;
12 use crate::DirstateParents;
12 use crate::DirstateParents;
13 use bitflags::bitflags;
13 use bitflags::bitflags;
14 use bytes_cast::unaligned::{U16Be, U32Be};
14 use bytes_cast::unaligned::{U16Be, U32Be};
15 use bytes_cast::BytesCast;
15 use bytes_cast::BytesCast;
16 use format_bytes::format_bytes;
16 use format_bytes::format_bytes;
17 use rand::Rng;
17 use rand::Rng;
18 use std::borrow::Cow;
18 use std::borrow::Cow;
19 use std::convert::{TryFrom, TryInto};
19 use std::convert::{TryFrom, TryInto};
20 use std::fmt::Write;
20 use std::fmt::Write;
21
21
22 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
22 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
23 /// This a redundant sanity check more than an actual "magic number" since
23 /// This a redundant sanity check more than an actual "magic number" since
24 /// `.hg/requires` already governs which format should be used.
24 /// `.hg/requires` already governs which format should be used.
25 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
25 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
26
26
27 /// Keep space for 256-bit hashes
27 /// Keep space for 256-bit hashes
28 const STORED_NODE_ID_BYTES: usize = 32;
28 const STORED_NODE_ID_BYTES: usize = 32;
29
29
30 /// … even though only 160 bits are used for now, with SHA-1
30 /// … even though only 160 bits are used for now, with SHA-1
31 const USED_NODE_ID_BYTES: usize = 20;
31 const USED_NODE_ID_BYTES: usize = 20;
32
32
33 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
33 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
34 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
34 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
35
35
36 /// Must match constants of the same names in `mercurial/dirstateutils/v2.py`
36 /// Must match constants of the same names in `mercurial/dirstateutils/v2.py`
37 const TREE_METADATA_SIZE: usize = 44;
37 const TREE_METADATA_SIZE: usize = 44;
38 const NODE_SIZE: usize = 44;
38 const NODE_SIZE: usize = 44;
39
39
40 /// Make sure that size-affecting changes are made knowingly
40 /// Make sure that size-affecting changes are made knowingly
41 #[allow(unused)]
41 #[allow(unused)]
42 fn static_assert_size_of() {
42 fn static_assert_size_of() {
43 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
43 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
44 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
44 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
45 let _ = std::mem::transmute::<Node, [u8; NODE_SIZE]>;
45 let _ = std::mem::transmute::<Node, [u8; NODE_SIZE]>;
46 }
46 }
47
47
48 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
48 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
49 #[derive(BytesCast)]
49 #[derive(BytesCast)]
50 #[repr(C)]
50 #[repr(C)]
51 struct DocketHeader {
51 struct DocketHeader {
52 marker: [u8; V2_FORMAT_MARKER.len()],
52 marker: [u8; V2_FORMAT_MARKER.len()],
53 parent_1: [u8; STORED_NODE_ID_BYTES],
53 parent_1: [u8; STORED_NODE_ID_BYTES],
54 parent_2: [u8; STORED_NODE_ID_BYTES],
54 parent_2: [u8; STORED_NODE_ID_BYTES],
55
55
56 metadata: TreeMetadata,
56 metadata: TreeMetadata,
57
57
58 /// Counted in bytes
58 /// Counted in bytes
59 data_size: Size,
59 data_size: Size,
60
60
61 uuid_size: u8,
61 uuid_size: u8,
62 }
62 }
63
63
64 pub struct Docket<'on_disk> {
64 pub struct Docket<'on_disk> {
65 header: &'on_disk DocketHeader,
65 header: &'on_disk DocketHeader,
66 pub uuid: &'on_disk [u8],
66 pub uuid: &'on_disk [u8],
67 }
67 }
68
68
69 /// Fields are documented in the *Tree metadata in the docket file*
69 /// Fields are documented in the *Tree metadata in the docket file*
70 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
70 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
71 #[derive(BytesCast)]
71 #[derive(BytesCast)]
72 #[repr(C)]
72 #[repr(C)]
73 pub struct TreeMetadata {
73 pub struct TreeMetadata {
74 root_nodes: ChildNodes,
74 root_nodes: ChildNodes,
75 nodes_with_entry_count: Size,
75 nodes_with_entry_count: Size,
76 nodes_with_copy_source_count: Size,
76 nodes_with_copy_source_count: Size,
77 unreachable_bytes: Size,
77 unreachable_bytes: Size,
78 unused: [u8; 4],
78 unused: [u8; 4],
79
79
80 /// See *Optional hash of ignore patterns* section of
80 /// See *Optional hash of ignore patterns* section of
81 /// `mercurial/helptext/internals/dirstate-v2.txt`
81 /// `mercurial/helptext/internals/dirstate-v2.txt`
82 ignore_patterns_hash: IgnorePatternsHash,
82 ignore_patterns_hash: IgnorePatternsHash,
83 }
83 }
84
84
85 /// Fields are documented in the *The data file format*
85 /// Fields are documented in the *The data file format*
86 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
86 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
87 #[derive(BytesCast)]
87 #[derive(BytesCast)]
88 #[repr(C)]
88 #[repr(C)]
89 pub(super) struct Node {
89 pub(super) struct Node {
90 full_path: PathSlice,
90 full_path: PathSlice,
91
91
92 /// In bytes from `self.full_path.start`
92 /// In bytes from `self.full_path.start`
93 base_name_start: PathSize,
93 base_name_start: PathSize,
94
94
95 copy_source: OptPathSlice,
95 copy_source: OptPathSlice,
96 children: ChildNodes,
96 children: ChildNodes,
97 pub(super) descendants_with_entry_count: Size,
97 pub(super) descendants_with_entry_count: Size,
98 pub(super) tracked_descendants_count: Size,
98 pub(super) tracked_descendants_count: Size,
99 flags: U16Be,
99 flags: U16Be,
100 size: U32Be,
100 size: U32Be,
101 mtime: PackedTruncatedTimestamp,
101 mtime: PackedTruncatedTimestamp,
102 }
102 }
103
103
104 bitflags! {
104 bitflags! {
105 #[repr(C)]
105 #[repr(C)]
106 struct Flags: u16 {
106 struct Flags: u16 {
107 const WDIR_TRACKED = 1 << 0;
107 const WDIR_TRACKED = 1 << 0;
108 const P1_TRACKED = 1 << 1;
108 const P1_TRACKED = 1 << 1;
109 const P2_INFO = 1 << 2;
109 const P2_INFO = 1 << 2;
110 const MODE_EXEC_PERM = 1 << 3;
110 const MODE_EXEC_PERM = 1 << 3;
111 const MODE_IS_SYMLINK = 1 << 4;
111 const MODE_IS_SYMLINK = 1 << 4;
112 const HAS_FALLBACK_EXEC = 1 << 5;
112 const HAS_FALLBACK_EXEC = 1 << 5;
113 const FALLBACK_EXEC = 1 << 6;
113 const FALLBACK_EXEC = 1 << 6;
114 const HAS_FALLBACK_SYMLINK = 1 << 7;
114 const HAS_FALLBACK_SYMLINK = 1 << 7;
115 const FALLBACK_SYMLINK = 1 << 8;
115 const FALLBACK_SYMLINK = 1 << 8;
116 const EXPECTED_STATE_IS_MODIFIED = 1 << 9;
116 const EXPECTED_STATE_IS_MODIFIED = 1 << 9;
117 const HAS_MODE_AND_SIZE = 1 <<10;
117 const HAS_MODE_AND_SIZE = 1 <<10;
118 const HAS_MTIME = 1 <<11;
118 const HAS_MTIME = 1 <<11;
119 const MTIME_SECOND_AMBIGUOUS = 1 << 12;
119 const MTIME_SECOND_AMBIGUOUS = 1 << 12;
120 const DIRECTORY = 1 <<13;
120 const DIRECTORY = 1 <<13;
121 const ALL_UNKNOWN_RECORDED = 1 <<14;
121 const ALL_UNKNOWN_RECORDED = 1 <<14;
122 const ALL_IGNORED_RECORDED = 1 <<15;
122 const ALL_IGNORED_RECORDED = 1 <<15;
123 }
123 }
124 }
124 }
125
125
126 /// Duration since the Unix epoch
126 /// Duration since the Unix epoch
127 #[derive(BytesCast, Copy, Clone)]
127 #[derive(BytesCast, Copy, Clone)]
128 #[repr(C)]
128 #[repr(C)]
129 struct PackedTruncatedTimestamp {
129 struct PackedTruncatedTimestamp {
130 truncated_seconds: U32Be,
130 truncated_seconds: U32Be,
131 nanoseconds: U32Be,
131 nanoseconds: U32Be,
132 }
132 }
133
133
134 /// Counted in bytes from the start of the file
134 /// Counted in bytes from the start of the file
135 ///
135 ///
136 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
136 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
137 type Offset = U32Be;
137 type Offset = U32Be;
138
138
139 /// Counted in number of items
139 /// Counted in number of items
140 ///
140 ///
141 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
141 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
142 type Size = U32Be;
142 type Size = U32Be;
143
143
144 /// Counted in bytes
144 /// Counted in bytes
145 ///
145 ///
146 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
146 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
147 type PathSize = U16Be;
147 type PathSize = U16Be;
148
148
149 /// A contiguous sequence of `len` times `Node`, representing the child nodes
149 /// A contiguous sequence of `len` times `Node`, representing the child nodes
150 /// of either some other node or of the repository root.
150 /// of either some other node or of the repository root.
151 ///
151 ///
152 /// Always sorted by ascending `full_path`, to allow binary search.
152 /// Always sorted by ascending `full_path`, to allow binary search.
153 /// Since nodes with the same parent nodes also have the same parent path,
153 /// Since nodes with the same parent nodes also have the same parent path,
154 /// only the `base_name`s need to be compared during binary search.
154 /// only the `base_name`s need to be compared during binary search.
155 #[derive(BytesCast, Copy, Clone)]
155 #[derive(BytesCast, Copy, Clone)]
156 #[repr(C)]
156 #[repr(C)]
157 struct ChildNodes {
157 struct ChildNodes {
158 start: Offset,
158 start: Offset,
159 len: Size,
159 len: Size,
160 }
160 }
161
161
162 /// A `HgPath` of `len` bytes
162 /// A `HgPath` of `len` bytes
163 #[derive(BytesCast, Copy, Clone)]
163 #[derive(BytesCast, Copy, Clone)]
164 #[repr(C)]
164 #[repr(C)]
165 struct PathSlice {
165 struct PathSlice {
166 start: Offset,
166 start: Offset,
167 len: PathSize,
167 len: PathSize,
168 }
168 }
169
169
170 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
170 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
171 type OptPathSlice = PathSlice;
171 type OptPathSlice = PathSlice;
172
172
173 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
173 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
174 ///
174 ///
175 /// This should only happen if Mercurial is buggy or a repository is corrupted.
175 /// This should only happen if Mercurial is buggy or a repository is corrupted.
176 #[derive(Debug)]
176 #[derive(Debug)]
177 pub struct DirstateV2ParseError;
177 pub struct DirstateV2ParseError;
178
178
179 impl From<DirstateV2ParseError> for HgError {
179 impl From<DirstateV2ParseError> for HgError {
180 fn from(_: DirstateV2ParseError) -> Self {
180 fn from(_: DirstateV2ParseError) -> Self {
181 HgError::corrupted("dirstate-v2 parse error")
181 HgError::corrupted("dirstate-v2 parse error")
182 }
182 }
183 }
183 }
184
184
185 impl From<DirstateV2ParseError> for crate::DirstateError {
185 impl From<DirstateV2ParseError> for crate::DirstateError {
186 fn from(error: DirstateV2ParseError) -> Self {
186 fn from(error: DirstateV2ParseError) -> Self {
187 HgError::from(error).into()
187 HgError::from(error).into()
188 }
188 }
189 }
189 }
190
190
191 impl TreeMetadata {
191 impl TreeMetadata {
192 pub fn as_bytes(&self) -> &[u8] {
192 pub fn as_bytes(&self) -> &[u8] {
193 BytesCast::as_bytes(self)
193 BytesCast::as_bytes(self)
194 }
194 }
195 }
195 }
196
196
197 impl<'on_disk> Docket<'on_disk> {
197 impl<'on_disk> Docket<'on_disk> {
198 /// Generate the identifier for a new data file
198 /// Generate the identifier for a new data file
199 ///
199 ///
200 /// TODO: support the `HGTEST_UUIDFILE` environment variable.
200 /// TODO: support the `HGTEST_UUIDFILE` environment variable.
201 /// See `mercurial/revlogutils/docket.py`
201 /// See `mercurial/revlogutils/docket.py`
202 pub fn new_uid() -> String {
202 pub fn new_uid() -> String {
203 const ID_LENGTH: usize = 8;
203 const ID_LENGTH: usize = 8;
204 let mut id = String::with_capacity(ID_LENGTH);
204 let mut id = String::with_capacity(ID_LENGTH);
205 let mut rng = rand::thread_rng();
205 let mut rng = rand::thread_rng();
206 for _ in 0..ID_LENGTH {
206 for _ in 0..ID_LENGTH {
207 // One random hexadecimal digit.
207 // One random hexadecimal digit.
208 // `unwrap` never panics because `impl Write for String`
208 // `unwrap` never panics because `impl Write for String`
209 // never returns an error.
209 // never returns an error.
210 write!(&mut id, "{:x}", rng.gen_range(0..16)).unwrap();
210 write!(&mut id, "{:x}", rng.gen_range(0..16)).unwrap();
211 }
211 }
212 id
212 id
213 }
213 }
214
214
215 pub fn serialize(
215 pub fn serialize(
216 parents: DirstateParents,
216 parents: DirstateParents,
217 tree_metadata: TreeMetadata,
217 tree_metadata: TreeMetadata,
218 data_size: u64,
218 data_size: u64,
219 uuid: &[u8],
219 uuid: &[u8],
220 ) -> Result<Vec<u8>, std::num::TryFromIntError> {
220 ) -> Result<Vec<u8>, std::num::TryFromIntError> {
221 let header = DocketHeader {
221 let header = DocketHeader {
222 marker: *V2_FORMAT_MARKER,
222 marker: *V2_FORMAT_MARKER,
223 parent_1: parents.p1.pad_to_256_bits(),
223 parent_1: parents.p1.pad_to_256_bits(),
224 parent_2: parents.p2.pad_to_256_bits(),
224 parent_2: parents.p2.pad_to_256_bits(),
225 metadata: tree_metadata,
225 metadata: tree_metadata,
226 data_size: u32::try_from(data_size)?.into(),
226 data_size: u32::try_from(data_size)?.into(),
227 uuid_size: uuid.len().try_into()?,
227 uuid_size: uuid.len().try_into()?,
228 };
228 };
229 let header = header.as_bytes();
229 let header = header.as_bytes();
230 let mut docket = Vec::with_capacity(header.len() + uuid.len());
230 let mut docket = Vec::with_capacity(header.len() + uuid.len());
231 docket.extend_from_slice(header);
231 docket.extend_from_slice(header);
232 docket.extend_from_slice(uuid);
232 docket.extend_from_slice(uuid);
233 Ok(docket)
233 Ok(docket)
234 }
234 }
235
235
236 pub fn parents(&self) -> DirstateParents {
236 pub fn parents(&self) -> DirstateParents {
237 use crate::Node;
237 use crate::Node;
238 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
238 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
239 .unwrap()
239 .unwrap()
240 .clone();
240 .clone();
241 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
241 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
242 .unwrap()
242 .unwrap()
243 .clone();
243 .clone();
244 DirstateParents { p1, p2 }
244 DirstateParents { p1, p2 }
245 }
245 }
246
246
247 pub fn tree_metadata(&self) -> &[u8] {
247 pub fn tree_metadata(&self) -> &[u8] {
248 self.header.metadata.as_bytes()
248 self.header.metadata.as_bytes()
249 }
249 }
250
250
251 pub fn data_size(&self) -> usize {
251 pub fn data_size(&self) -> usize {
252 // This `unwrap` could only panic on a 16-bit CPU
252 // This `unwrap` could only panic on a 16-bit CPU
253 self.header.data_size.get().try_into().unwrap()
253 self.header.data_size.get().try_into().unwrap()
254 }
254 }
255
255
256 pub fn data_filename(&self) -> String {
256 pub fn data_filename(&self) -> String {
257 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
257 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
258 }
258 }
259 }
259 }
260
260
261 pub fn read_docket(
261 pub fn read_docket(
262 on_disk: &[u8],
262 on_disk: &[u8],
263 ) -> Result<Docket<'_>, DirstateV2ParseError> {
263 ) -> Result<Docket<'_>, DirstateV2ParseError> {
264 let (header, uuid) =
264 let (header, uuid) =
265 DocketHeader::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
265 DocketHeader::from_bytes(on_disk).map_err(|_| DirstateV2ParseError)?;
266 let uuid_size = header.uuid_size as usize;
266 let uuid_size = header.uuid_size as usize;
267 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
267 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
268 Ok(Docket { header, uuid })
268 Ok(Docket { header, uuid })
269 } else {
269 } else {
270 Err(DirstateV2ParseError)
270 Err(DirstateV2ParseError)
271 }
271 }
272 }
272 }
273
273
274 pub(super) fn read<'on_disk>(
274 pub(super) fn read<'on_disk>(
275 on_disk: &'on_disk [u8],
275 on_disk: &'on_disk [u8],
276 metadata: &[u8],
276 metadata: &[u8],
277 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
277 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
278 if on_disk.is_empty() {
278 if on_disk.is_empty() {
279 return Ok(DirstateMap::empty(on_disk));
279 return Ok(DirstateMap::empty(on_disk));
280 }
280 }
281 let (meta, _) = TreeMetadata::from_bytes(metadata)
281 let (meta, _) = TreeMetadata::from_bytes(metadata)
282 .map_err(|_| DirstateV2ParseError)?;
282 .map_err(|_| DirstateV2ParseError)?;
283 let dirstate_map = DirstateMap {
283 let dirstate_map = DirstateMap {
284 on_disk,
284 on_disk,
285 root: dirstate_map::ChildNodes::OnDisk(read_nodes(
285 root: dirstate_map::ChildNodes::OnDisk(read_nodes(
286 on_disk,
286 on_disk,
287 meta.root_nodes,
287 meta.root_nodes,
288 )?),
288 )?),
289 nodes_with_entry_count: meta.nodes_with_entry_count.get(),
289 nodes_with_entry_count: meta.nodes_with_entry_count.get(),
290 nodes_with_copy_source_count: meta.nodes_with_copy_source_count.get(),
290 nodes_with_copy_source_count: meta.nodes_with_copy_source_count.get(),
291 ignore_patterns_hash: meta.ignore_patterns_hash,
291 ignore_patterns_hash: meta.ignore_patterns_hash,
292 unreachable_bytes: meta.unreachable_bytes.get(),
292 unreachable_bytes: meta.unreachable_bytes.get(),
293 old_data_size: on_disk.len(),
293 old_data_size: on_disk.len(),
294 };
294 };
295 Ok(dirstate_map)
295 Ok(dirstate_map)
296 }
296 }
297
297
298 impl Node {
298 impl Node {
299 pub(super) fn full_path<'on_disk>(
299 pub(super) fn full_path<'on_disk>(
300 &self,
300 &self,
301 on_disk: &'on_disk [u8],
301 on_disk: &'on_disk [u8],
302 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
302 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
303 read_hg_path(on_disk, self.full_path)
303 read_hg_path(on_disk, self.full_path)
304 }
304 }
305
305
306 pub(super) fn base_name_start<'on_disk>(
306 pub(super) fn base_name_start<'on_disk>(
307 &self,
307 &self,
308 ) -> Result<usize, DirstateV2ParseError> {
308 ) -> Result<usize, DirstateV2ParseError> {
309 let start = self.base_name_start.get();
309 let start = self.base_name_start.get();
310 if start < self.full_path.len.get() {
310 if start < self.full_path.len.get() {
311 let start = usize::try_from(start)
311 let start = usize::try_from(start)
312 // u32 -> usize, could only panic on a 16-bit CPU
312 // u32 -> usize, could only panic on a 16-bit CPU
313 .expect("dirstate-v2 base_name_start out of bounds");
313 .expect("dirstate-v2 base_name_start out of bounds");
314 Ok(start)
314 Ok(start)
315 } else {
315 } else {
316 Err(DirstateV2ParseError)
316 Err(DirstateV2ParseError)
317 }
317 }
318 }
318 }
319
319
320 pub(super) fn base_name<'on_disk>(
320 pub(super) fn base_name<'on_disk>(
321 &self,
321 &self,
322 on_disk: &'on_disk [u8],
322 on_disk: &'on_disk [u8],
323 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
323 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
324 let full_path = self.full_path(on_disk)?;
324 let full_path = self.full_path(on_disk)?;
325 let base_name_start = self.base_name_start()?;
325 let base_name_start = self.base_name_start()?;
326 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
326 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
327 }
327 }
328
328
329 pub(super) fn path<'on_disk>(
329 pub(super) fn path<'on_disk>(
330 &self,
330 &self,
331 on_disk: &'on_disk [u8],
331 on_disk: &'on_disk [u8],
332 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
332 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
333 Ok(WithBasename::from_raw_parts(
333 Ok(WithBasename::from_raw_parts(
334 Cow::Borrowed(self.full_path(on_disk)?),
334 Cow::Borrowed(self.full_path(on_disk)?),
335 self.base_name_start()?,
335 self.base_name_start()?,
336 ))
336 ))
337 }
337 }
338
338
339 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
339 pub(super) fn has_copy_source<'on_disk>(&self) -> bool {
340 self.copy_source.start.get() != 0
340 self.copy_source.start.get() != 0
341 }
341 }
342
342
343 pub(super) fn copy_source<'on_disk>(
343 pub(super) fn copy_source<'on_disk>(
344 &self,
344 &self,
345 on_disk: &'on_disk [u8],
345 on_disk: &'on_disk [u8],
346 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
346 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
347 Ok(if self.has_copy_source() {
347 Ok(if self.has_copy_source() {
348 Some(read_hg_path(on_disk, self.copy_source)?)
348 Some(read_hg_path(on_disk, self.copy_source)?)
349 } else {
349 } else {
350 None
350 None
351 })
351 })
352 }
352 }
353
353
354 fn flags(&self) -> Flags {
354 fn flags(&self) -> Flags {
355 Flags::from_bits_truncate(self.flags.get())
355 Flags::from_bits_truncate(self.flags.get())
356 }
356 }
357
357
358 fn has_entry(&self) -> bool {
358 fn has_entry(&self) -> bool {
359 self.flags().intersects(
359 self.flags().intersects(
360 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
360 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
361 )
361 )
362 }
362 }
363
363
364 pub(super) fn node_data(
364 pub(super) fn node_data(
365 &self,
365 &self,
366 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
366 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
367 if self.has_entry() {
367 if self.has_entry() {
368 Ok(dirstate_map::NodeData::Entry(self.assume_entry()?))
368 Ok(dirstate_map::NodeData::Entry(self.assume_entry()?))
369 } else if let Some(mtime) = self.cached_directory_mtime()? {
369 } else if let Some(mtime) = self.cached_directory_mtime()? {
370 Ok(dirstate_map::NodeData::CachedDirectory { mtime })
370 Ok(dirstate_map::NodeData::CachedDirectory { mtime })
371 } else {
371 } else {
372 Ok(dirstate_map::NodeData::None)
372 Ok(dirstate_map::NodeData::None)
373 }
373 }
374 }
374 }
375
375
376 pub(super) fn cached_directory_mtime(
376 pub(super) fn cached_directory_mtime(
377 &self,
377 &self,
378 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
378 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
379 // For now we do not have code to handle the absence of
379 // For now we do not have code to handle the absence of
380 // ALL_UNKNOWN_RECORDED, so we ignore the mtime if the flag is
380 // ALL_UNKNOWN_RECORDED, so we ignore the mtime if the flag is
381 // unset.
381 // unset.
382 if self.flags().contains(Flags::DIRECTORY)
382 if self.flags().contains(Flags::DIRECTORY)
383 && self.flags().contains(Flags::HAS_MTIME)
383 && self.flags().contains(Flags::HAS_MTIME)
384 && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
384 && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
385 {
385 {
386 Ok(Some(self.mtime()?))
386 Ok(Some(self.mtime()?))
387 } else {
387 } else {
388 Ok(None)
388 Ok(None)
389 }
389 }
390 }
390 }
391
391
392 fn synthesize_unix_mode(&self) -> u32 {
392 fn synthesize_unix_mode(&self) -> u32 {
393 let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
393 let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
394 libc::S_IFLNK
394 libc::S_IFLNK
395 } else {
395 } else {
396 libc::S_IFREG
396 libc::S_IFREG
397 };
397 };
398 let permisions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
398 let permisions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
399 0o755
399 0o755
400 } else {
400 } else {
401 0o644
401 0o644
402 };
402 };
403 (file_type | permisions).into()
403 (file_type | permisions).into()
404 }
404 }
405
405
406 fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
406 fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
407 let mut m: TruncatedTimestamp = self.mtime.try_into()?;
407 let mut m: TruncatedTimestamp = self.mtime.try_into()?;
408 if self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) {
408 if self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) {
409 m.second_ambiguous = true;
409 m.second_ambiguous = true;
410 }
410 }
411 Ok(m)
411 Ok(m)
412 }
412 }
413
413
414 fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
414 fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
415 // TODO: convert through raw bits instead?
415 // TODO: convert through raw bits instead?
416 let wdir_tracked = self.flags().contains(Flags::WDIR_TRACKED);
416 let wdir_tracked = self.flags().contains(Flags::WDIR_TRACKED);
417 let p1_tracked = self.flags().contains(Flags::P1_TRACKED);
417 let p1_tracked = self.flags().contains(Flags::P1_TRACKED);
418 let p2_info = self.flags().contains(Flags::P2_INFO);
418 let p2_info = self.flags().contains(Flags::P2_INFO);
419 let mode_size = if self.flags().contains(Flags::HAS_MODE_AND_SIZE)
419 let mode_size = if self.flags().contains(Flags::HAS_MODE_AND_SIZE)
420 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
420 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
421 {
421 {
422 Some((self.synthesize_unix_mode(), self.size.into()))
422 Some((self.synthesize_unix_mode(), self.size.into()))
423 } else {
423 } else {
424 None
424 None
425 };
425 };
426 let mtime = if self.flags().contains(Flags::HAS_MTIME)
426 let mtime = if self.flags().contains(Flags::HAS_MTIME)
427 && !self.flags().contains(Flags::DIRECTORY)
427 && !self.flags().contains(Flags::DIRECTORY)
428 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
428 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
429 {
429 {
430 Some(self.mtime()?)
430 Some(self.mtime()?)
431 } else {
431 } else {
432 None
432 None
433 };
433 };
434 let fallback_exec = if self.flags().contains(Flags::HAS_FALLBACK_EXEC)
434 let fallback_exec = if self.flags().contains(Flags::HAS_FALLBACK_EXEC)
435 {
435 {
436 Some(self.flags().contains(Flags::FALLBACK_EXEC))
436 Some(self.flags().contains(Flags::FALLBACK_EXEC))
437 } else {
437 } else {
438 None
438 None
439 };
439 };
440 let fallback_symlink =
440 let fallback_symlink =
441 if self.flags().contains(Flags::HAS_FALLBACK_SYMLINK) {
441 if self.flags().contains(Flags::HAS_FALLBACK_SYMLINK) {
442 Some(self.flags().contains(Flags::FALLBACK_SYMLINK))
442 Some(self.flags().contains(Flags::FALLBACK_SYMLINK))
443 } else {
443 } else {
444 None
444 None
445 };
445 };
446 Ok(DirstateEntry::from_v2_data(
446 Ok(DirstateEntry::from_v2_data(
447 wdir_tracked,
447 wdir_tracked,
448 p1_tracked,
448 p1_tracked,
449 p2_info,
449 p2_info,
450 mode_size,
450 mode_size,
451 mtime,
451 mtime,
452 fallback_exec,
452 fallback_exec,
453 fallback_symlink,
453 fallback_symlink,
454 ))
454 ))
455 }
455 }
456
456
457 pub(super) fn entry(
457 pub(super) fn entry(
458 &self,
458 &self,
459 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
459 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
460 if self.has_entry() {
460 if self.has_entry() {
461 Ok(Some(self.assume_entry()?))
461 Ok(Some(self.assume_entry()?))
462 } else {
462 } else {
463 Ok(None)
463 Ok(None)
464 }
464 }
465 }
465 }
466
466
467 pub(super) fn children<'on_disk>(
467 pub(super) fn children<'on_disk>(
468 &self,
468 &self,
469 on_disk: &'on_disk [u8],
469 on_disk: &'on_disk [u8],
470 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
470 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
471 read_nodes(on_disk, self.children)
471 read_nodes(on_disk, self.children)
472 }
472 }
473
473
474 pub(super) fn to_in_memory_node<'on_disk>(
474 pub(super) fn to_in_memory_node<'on_disk>(
475 &self,
475 &self,
476 on_disk: &'on_disk [u8],
476 on_disk: &'on_disk [u8],
477 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
477 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
478 Ok(dirstate_map::Node {
478 Ok(dirstate_map::Node {
479 children: dirstate_map::ChildNodes::OnDisk(
479 children: dirstate_map::ChildNodes::OnDisk(
480 self.children(on_disk)?,
480 self.children(on_disk)?,
481 ),
481 ),
482 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
482 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
483 data: self.node_data()?,
483 data: self.node_data()?,
484 descendants_with_entry_count: self
484 descendants_with_entry_count: self
485 .descendants_with_entry_count
485 .descendants_with_entry_count
486 .get(),
486 .get(),
487 tracked_descendants_count: self.tracked_descendants_count.get(),
487 tracked_descendants_count: self.tracked_descendants_count.get(),
488 })
488 })
489 }
489 }
490
490
491 fn from_dirstate_entry(
491 fn from_dirstate_entry(
492 entry: &DirstateEntry,
492 entry: &DirstateEntry,
493 ) -> (Flags, U32Be, PackedTruncatedTimestamp) {
493 ) -> (Flags, U32Be, PackedTruncatedTimestamp) {
494 let (
494 let (
495 wdir_tracked,
495 wdir_tracked,
496 p1_tracked,
496 p1_tracked,
497 p2_info,
497 p2_info,
498 mode_size_opt,
498 mode_size_opt,
499 mtime_opt,
499 mtime_opt,
500 fallback_exec,
500 fallback_exec,
501 fallback_symlink,
501 fallback_symlink,
502 ) = entry.v2_data();
502 ) = entry.v2_data();
503 // TODO: convert throug raw flag bits instead?
503 // TODO: convert throug raw flag bits instead?
504 let mut flags = Flags::empty();
504 let mut flags = Flags::empty();
505 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
505 flags.set(Flags::WDIR_TRACKED, wdir_tracked);
506 flags.set(Flags::P1_TRACKED, p1_tracked);
506 flags.set(Flags::P1_TRACKED, p1_tracked);
507 flags.set(Flags::P2_INFO, p2_info);
507 flags.set(Flags::P2_INFO, p2_info);
508 let size = if let Some((m, s)) = mode_size_opt {
508 let size = if let Some((m, s)) = mode_size_opt {
509 let exec_perm = m & (libc::S_IXUSR as u32) != 0;
509 let exec_perm = m & (libc::S_IXUSR as u32) != 0;
510 let is_symlink = m & (libc::S_IFMT as u32) == libc::S_IFLNK as u32;
510 let is_symlink = m & (libc::S_IFMT as u32) == libc::S_IFLNK as u32;
511 flags.set(Flags::MODE_EXEC_PERM, exec_perm);
511 flags.set(Flags::MODE_EXEC_PERM, exec_perm);
512 flags.set(Flags::MODE_IS_SYMLINK, is_symlink);
512 flags.set(Flags::MODE_IS_SYMLINK, is_symlink);
513 flags.insert(Flags::HAS_MODE_AND_SIZE);
513 flags.insert(Flags::HAS_MODE_AND_SIZE);
514 s.into()
514 s.into()
515 } else {
515 } else {
516 0.into()
516 0.into()
517 };
517 };
518 let mtime = if let Some(m) = mtime_opt {
518 let mtime = if let Some(m) = mtime_opt {
519 flags.insert(Flags::HAS_MTIME);
519 flags.insert(Flags::HAS_MTIME);
520 if m.second_ambiguous {
520 if m.second_ambiguous {
521 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS);
521 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS);
522 };
522 };
523 m.into()
523 m.into()
524 } else {
524 } else {
525 PackedTruncatedTimestamp::null()
525 PackedTruncatedTimestamp::null()
526 };
526 };
527 if let Some(f_exec) = fallback_exec {
527 if let Some(f_exec) = fallback_exec {
528 flags.insert(Flags::HAS_FALLBACK_EXEC);
528 flags.insert(Flags::HAS_FALLBACK_EXEC);
529 if f_exec {
529 if f_exec {
530 flags.insert(Flags::FALLBACK_EXEC);
530 flags.insert(Flags::FALLBACK_EXEC);
531 }
531 }
532 }
532 }
533 if let Some(f_symlink) = fallback_symlink {
533 if let Some(f_symlink) = fallback_symlink {
534 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
534 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
535 if f_symlink {
535 if f_symlink {
536 flags.insert(Flags::FALLBACK_SYMLINK);
536 flags.insert(Flags::FALLBACK_SYMLINK);
537 }
537 }
538 }
538 }
539 (flags, size, mtime)
539 (flags, size, mtime)
540 }
540 }
541 }
541 }
542
542
543 fn read_hg_path(
543 fn read_hg_path(
544 on_disk: &[u8],
544 on_disk: &[u8],
545 slice: PathSlice,
545 slice: PathSlice,
546 ) -> Result<&HgPath, DirstateV2ParseError> {
546 ) -> Result<&HgPath, DirstateV2ParseError> {
547 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
547 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
548 }
548 }
549
549
550 fn read_nodes(
550 fn read_nodes(
551 on_disk: &[u8],
551 on_disk: &[u8],
552 slice: ChildNodes,
552 slice: ChildNodes,
553 ) -> Result<&[Node], DirstateV2ParseError> {
553 ) -> Result<&[Node], DirstateV2ParseError> {
554 read_slice(on_disk, slice.start, slice.len.get())
554 read_slice(on_disk, slice.start, slice.len.get())
555 }
555 }
556
556
557 fn read_slice<T, Len>(
557 fn read_slice<T, Len>(
558 on_disk: &[u8],
558 on_disk: &[u8],
559 start: Offset,
559 start: Offset,
560 len: Len,
560 len: Len,
561 ) -> Result<&[T], DirstateV2ParseError>
561 ) -> Result<&[T], DirstateV2ParseError>
562 where
562 where
563 T: BytesCast,
563 T: BytesCast,
564 Len: TryInto<usize>,
564 Len: TryInto<usize>,
565 {
565 {
566 // Either `usize::MAX` would result in "out of bounds" error since a single
566 // Either `usize::MAX` would result in "out of bounds" error since a single
567 // `&[u8]` cannot occupy the entire addess space.
567 // `&[u8]` cannot occupy the entire addess space.
568 let start = start.get().try_into().unwrap_or(std::usize::MAX);
568 let start = start.get().try_into().unwrap_or(std::usize::MAX);
569 let len = len.try_into().unwrap_or(std::usize::MAX);
569 let len = len.try_into().unwrap_or(std::usize::MAX);
570 on_disk
570 on_disk
571 .get(start..)
571 .get(start..)
572 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
572 .and_then(|bytes| T::slice_from_bytes(bytes, len).ok())
573 .map(|(slice, _rest)| slice)
573 .map(|(slice, _rest)| slice)
574 .ok_or_else(|| DirstateV2ParseError)
574 .ok_or_else(|| DirstateV2ParseError)
575 }
575 }
576
576
577 pub(crate) fn for_each_tracked_path<'on_disk>(
577 pub(crate) fn for_each_tracked_path<'on_disk>(
578 on_disk: &'on_disk [u8],
578 on_disk: &'on_disk [u8],
579 metadata: &[u8],
579 metadata: &[u8],
580 mut f: impl FnMut(&'on_disk HgPath),
580 mut f: impl FnMut(&'on_disk HgPath),
581 ) -> Result<(), DirstateV2ParseError> {
581 ) -> Result<(), DirstateV2ParseError> {
582 let (meta, _) = TreeMetadata::from_bytes(metadata)
582 let (meta, _) = TreeMetadata::from_bytes(metadata)
583 .map_err(|_| DirstateV2ParseError)?;
583 .map_err(|_| DirstateV2ParseError)?;
584 fn recur<'on_disk>(
584 fn recur<'on_disk>(
585 on_disk: &'on_disk [u8],
585 on_disk: &'on_disk [u8],
586 nodes: ChildNodes,
586 nodes: ChildNodes,
587 f: &mut impl FnMut(&'on_disk HgPath),
587 f: &mut impl FnMut(&'on_disk HgPath),
588 ) -> Result<(), DirstateV2ParseError> {
588 ) -> Result<(), DirstateV2ParseError> {
589 for node in read_nodes(on_disk, nodes)? {
589 for node in read_nodes(on_disk, nodes)? {
590 if let Some(entry) = node.entry()? {
590 if let Some(entry) = node.entry()? {
591 if entry.state().is_tracked() {
591 if entry.state().is_tracked() {
592 f(node.full_path(on_disk)?)
592 f(node.full_path(on_disk)?)
593 }
593 }
594 }
594 }
595 recur(on_disk, node.children, f)?
595 recur(on_disk, node.children, f)?
596 }
596 }
597 Ok(())
597 Ok(())
598 }
598 }
599 recur(on_disk, meta.root_nodes, &mut f)
599 recur(on_disk, meta.root_nodes, &mut f)
600 }
600 }
601
601
602 /// Returns new data and metadata, together with whether that data should be
602 /// Returns new data and metadata, together with whether that data should be
603 /// appended to the existing data file whose content is at
603 /// appended to the existing data file whose content is at
604 /// `dirstate_map.on_disk` (true), instead of written to a new data file
604 /// `dirstate_map.on_disk` (true), instead of written to a new data file
605 /// (false), and the previous size of data on disk.
605 /// (false), and the previous size of data on disk.
606 pub(super) fn write(
606 pub(super) fn write(
607 dirstate_map: &DirstateMap,
607 dirstate_map: &DirstateMap,
608 can_append: bool,
608 can_append: bool,
609 ) -> Result<(Vec<u8>, TreeMetadata, bool, usize), DirstateError> {
609 ) -> Result<(Vec<u8>, TreeMetadata, bool, usize), DirstateError> {
610 let append = can_append && dirstate_map.write_should_append();
610 let append = can_append && dirstate_map.write_should_append();
611
611
612 // This ignores the space for paths, and for nodes without an entry.
612 // This ignores the space for paths, and for nodes without an entry.
613 // TODO: better estimate? Skip the `Vec` and write to a file directly?
613 // TODO: better estimate? Skip the `Vec` and write to a file directly?
614 let size_guess = std::mem::size_of::<Node>()
614 let size_guess = std::mem::size_of::<Node>()
615 * dirstate_map.nodes_with_entry_count as usize;
615 * dirstate_map.nodes_with_entry_count as usize;
616
616
617 let mut writer = Writer {
617 let mut writer = Writer {
618 dirstate_map,
618 dirstate_map,
619 append,
619 append,
620 out: Vec::with_capacity(size_guess),
620 out: Vec::with_capacity(size_guess),
621 };
621 };
622
622
623 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
623 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
624
624
625 let unreachable_bytes = if append {
626 dirstate_map.unreachable_bytes
627 } else {
628 0
629 };
625 let meta = TreeMetadata {
630 let meta = TreeMetadata {
626 root_nodes,
631 root_nodes,
627 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
632 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
628 nodes_with_copy_source_count: dirstate_map
633 nodes_with_copy_source_count: dirstate_map
629 .nodes_with_copy_source_count
634 .nodes_with_copy_source_count
630 .into(),
635 .into(),
631 unreachable_bytes: dirstate_map.unreachable_bytes.into(),
636 unreachable_bytes: unreachable_bytes.into(),
632 unused: [0; 4],
637 unused: [0; 4],
633 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
638 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
634 };
639 };
635 Ok((writer.out, meta, append, dirstate_map.old_data_size))
640 Ok((writer.out, meta, append, dirstate_map.old_data_size))
636 }
641 }
637
642
638 struct Writer<'dmap, 'on_disk> {
643 struct Writer<'dmap, 'on_disk> {
639 dirstate_map: &'dmap DirstateMap<'on_disk>,
644 dirstate_map: &'dmap DirstateMap<'on_disk>,
640 append: bool,
645 append: bool,
641 out: Vec<u8>,
646 out: Vec<u8>,
642 }
647 }
643
648
644 impl Writer<'_, '_> {
649 impl Writer<'_, '_> {
645 fn write_nodes(
650 fn write_nodes(
646 &mut self,
651 &mut self,
647 nodes: dirstate_map::ChildNodesRef,
652 nodes: dirstate_map::ChildNodesRef,
648 ) -> Result<ChildNodes, DirstateError> {
653 ) -> Result<ChildNodes, DirstateError> {
649 // Reuse already-written nodes if possible
654 // Reuse already-written nodes if possible
650 if self.append {
655 if self.append {
651 if let dirstate_map::ChildNodesRef::OnDisk(nodes_slice) = nodes {
656 if let dirstate_map::ChildNodesRef::OnDisk(nodes_slice) = nodes {
652 let start = self.on_disk_offset_of(nodes_slice).expect(
657 let start = self.on_disk_offset_of(nodes_slice).expect(
653 "dirstate-v2 OnDisk nodes not found within on_disk",
658 "dirstate-v2 OnDisk nodes not found within on_disk",
654 );
659 );
655 let len = child_nodes_len_from_usize(nodes_slice.len());
660 let len = child_nodes_len_from_usize(nodes_slice.len());
656 return Ok(ChildNodes { start, len });
661 return Ok(ChildNodes { start, len });
657 }
662 }
658 }
663 }
659
664
660 // `dirstate_map::ChildNodes::InMemory` contains a `HashMap` which has
665 // `dirstate_map::ChildNodes::InMemory` contains a `HashMap` which has
661 // undefined iteration order. Sort to enable binary search in the
666 // undefined iteration order. Sort to enable binary search in the
662 // written file.
667 // written file.
663 let nodes = nodes.sorted();
668 let nodes = nodes.sorted();
664 let nodes_len = nodes.len();
669 let nodes_len = nodes.len();
665
670
666 // First accumulate serialized nodes in a `Vec`
671 // First accumulate serialized nodes in a `Vec`
667 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
672 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
668 for node in nodes {
673 for node in nodes {
669 let children =
674 let children =
670 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
675 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
671 let full_path = node.full_path(self.dirstate_map.on_disk)?;
676 let full_path = node.full_path(self.dirstate_map.on_disk)?;
672 let full_path = self.write_path(full_path.as_bytes());
677 let full_path = self.write_path(full_path.as_bytes());
673 let copy_source = if let Some(source) =
678 let copy_source = if let Some(source) =
674 node.copy_source(self.dirstate_map.on_disk)?
679 node.copy_source(self.dirstate_map.on_disk)?
675 {
680 {
676 self.write_path(source.as_bytes())
681 self.write_path(source.as_bytes())
677 } else {
682 } else {
678 PathSlice {
683 PathSlice {
679 start: 0.into(),
684 start: 0.into(),
680 len: 0.into(),
685 len: 0.into(),
681 }
686 }
682 };
687 };
683 on_disk_nodes.push(match node {
688 on_disk_nodes.push(match node {
684 NodeRef::InMemory(path, node) => {
689 NodeRef::InMemory(path, node) => {
685 let (flags, size, mtime) = match &node.data {
690 let (flags, size, mtime) = match &node.data {
686 dirstate_map::NodeData::Entry(entry) => {
691 dirstate_map::NodeData::Entry(entry) => {
687 Node::from_dirstate_entry(entry)
692 Node::from_dirstate_entry(entry)
688 }
693 }
689 dirstate_map::NodeData::CachedDirectory { mtime } => {
694 dirstate_map::NodeData::CachedDirectory { mtime } => {
690 // we currently never set a mtime if unknown file
695 // we currently never set a mtime if unknown file
691 // are present.
696 // are present.
692 // So if we have a mtime for a directory, we know
697 // So if we have a mtime for a directory, we know
693 // they are no unknown
698 // they are no unknown
694 // files and we
699 // files and we
695 // blindly set ALL_UNKNOWN_RECORDED.
700 // blindly set ALL_UNKNOWN_RECORDED.
696 //
701 //
697 // We never set ALL_IGNORED_RECORDED since we
702 // We never set ALL_IGNORED_RECORDED since we
698 // don't track that case
703 // don't track that case
699 // currently.
704 // currently.
700 let mut flags = Flags::DIRECTORY
705 let mut flags = Flags::DIRECTORY
701 | Flags::HAS_MTIME
706 | Flags::HAS_MTIME
702 | Flags::ALL_UNKNOWN_RECORDED;
707 | Flags::ALL_UNKNOWN_RECORDED;
703 if mtime.second_ambiguous {
708 if mtime.second_ambiguous {
704 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS)
709 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS)
705 }
710 }
706 (flags, 0.into(), (*mtime).into())
711 (flags, 0.into(), (*mtime).into())
707 }
712 }
708 dirstate_map::NodeData::None => (
713 dirstate_map::NodeData::None => (
709 Flags::DIRECTORY,
714 Flags::DIRECTORY,
710 0.into(),
715 0.into(),
711 PackedTruncatedTimestamp::null(),
716 PackedTruncatedTimestamp::null(),
712 ),
717 ),
713 };
718 };
714 Node {
719 Node {
715 children,
720 children,
716 copy_source,
721 copy_source,
717 full_path,
722 full_path,
718 base_name_start: u16::try_from(path.base_name_start())
723 base_name_start: u16::try_from(path.base_name_start())
719 // Could only panic for paths over 64 KiB
724 // Could only panic for paths over 64 KiB
720 .expect("dirstate-v2 path length overflow")
725 .expect("dirstate-v2 path length overflow")
721 .into(),
726 .into(),
722 descendants_with_entry_count: node
727 descendants_with_entry_count: node
723 .descendants_with_entry_count
728 .descendants_with_entry_count
724 .into(),
729 .into(),
725 tracked_descendants_count: node
730 tracked_descendants_count: node
726 .tracked_descendants_count
731 .tracked_descendants_count
727 .into(),
732 .into(),
728 flags: flags.bits().into(),
733 flags: flags.bits().into(),
729 size,
734 size,
730 mtime,
735 mtime,
731 }
736 }
732 }
737 }
733 NodeRef::OnDisk(node) => Node {
738 NodeRef::OnDisk(node) => Node {
734 children,
739 children,
735 copy_source,
740 copy_source,
736 full_path,
741 full_path,
737 ..*node
742 ..*node
738 },
743 },
739 })
744 })
740 }
745 }
741 // … so we can write them contiguously, after writing everything else
746 // … so we can write them contiguously, after writing everything else
742 // they refer to.
747 // they refer to.
743 let start = self.current_offset();
748 let start = self.current_offset();
744 let len = child_nodes_len_from_usize(nodes_len);
749 let len = child_nodes_len_from_usize(nodes_len);
745 self.out.extend(on_disk_nodes.as_bytes());
750 self.out.extend(on_disk_nodes.as_bytes());
746 Ok(ChildNodes { start, len })
751 Ok(ChildNodes { start, len })
747 }
752 }
748
753
749 /// If the given slice of items is within `on_disk`, returns its offset
754 /// If the given slice of items is within `on_disk`, returns its offset
750 /// from the start of `on_disk`.
755 /// from the start of `on_disk`.
751 fn on_disk_offset_of<T>(&self, slice: &[T]) -> Option<Offset>
756 fn on_disk_offset_of<T>(&self, slice: &[T]) -> Option<Offset>
752 where
757 where
753 T: BytesCast,
758 T: BytesCast,
754 {
759 {
755 fn address_range(slice: &[u8]) -> std::ops::RangeInclusive<usize> {
760 fn address_range(slice: &[u8]) -> std::ops::RangeInclusive<usize> {
756 let start = slice.as_ptr() as usize;
761 let start = slice.as_ptr() as usize;
757 let end = start + slice.len();
762 let end = start + slice.len();
758 start..=end
763 start..=end
759 }
764 }
760 let slice_addresses = address_range(slice.as_bytes());
765 let slice_addresses = address_range(slice.as_bytes());
761 let on_disk_addresses = address_range(self.dirstate_map.on_disk);
766 let on_disk_addresses = address_range(self.dirstate_map.on_disk);
762 if on_disk_addresses.contains(slice_addresses.start())
767 if on_disk_addresses.contains(slice_addresses.start())
763 && on_disk_addresses.contains(slice_addresses.end())
768 && on_disk_addresses.contains(slice_addresses.end())
764 {
769 {
765 let offset = slice_addresses.start() - on_disk_addresses.start();
770 let offset = slice_addresses.start() - on_disk_addresses.start();
766 Some(offset_from_usize(offset))
771 Some(offset_from_usize(offset))
767 } else {
772 } else {
768 None
773 None
769 }
774 }
770 }
775 }
771
776
772 fn current_offset(&mut self) -> Offset {
777 fn current_offset(&mut self) -> Offset {
773 let mut offset = self.out.len();
778 let mut offset = self.out.len();
774 if self.append {
779 if self.append {
775 offset += self.dirstate_map.on_disk.len()
780 offset += self.dirstate_map.on_disk.len()
776 }
781 }
777 offset_from_usize(offset)
782 offset_from_usize(offset)
778 }
783 }
779
784
780 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
785 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
781 let len = path_len_from_usize(slice.len());
786 let len = path_len_from_usize(slice.len());
782 // Reuse an already-written path if possible
787 // Reuse an already-written path if possible
783 if self.append {
788 if self.append {
784 if let Some(start) = self.on_disk_offset_of(slice) {
789 if let Some(start) = self.on_disk_offset_of(slice) {
785 return PathSlice { start, len };
790 return PathSlice { start, len };
786 }
791 }
787 }
792 }
788 let start = self.current_offset();
793 let start = self.current_offset();
789 self.out.extend(slice.as_bytes());
794 self.out.extend(slice.as_bytes());
790 PathSlice { start, len }
795 PathSlice { start, len }
791 }
796 }
792 }
797 }
793
798
794 fn offset_from_usize(x: usize) -> Offset {
799 fn offset_from_usize(x: usize) -> Offset {
795 u32::try_from(x)
800 u32::try_from(x)
796 // Could only panic for a dirstate file larger than 4 GiB
801 // Could only panic for a dirstate file larger than 4 GiB
797 .expect("dirstate-v2 offset overflow")
802 .expect("dirstate-v2 offset overflow")
798 .into()
803 .into()
799 }
804 }
800
805
801 fn child_nodes_len_from_usize(x: usize) -> Size {
806 fn child_nodes_len_from_usize(x: usize) -> Size {
802 u32::try_from(x)
807 u32::try_from(x)
803 // Could only panic with over 4 billion nodes
808 // Could only panic with over 4 billion nodes
804 .expect("dirstate-v2 slice length overflow")
809 .expect("dirstate-v2 slice length overflow")
805 .into()
810 .into()
806 }
811 }
807
812
808 fn path_len_from_usize(x: usize) -> PathSize {
813 fn path_len_from_usize(x: usize) -> PathSize {
809 u16::try_from(x)
814 u16::try_from(x)
810 // Could only panic for paths over 64 KiB
815 // Could only panic for paths over 64 KiB
811 .expect("dirstate-v2 path length overflow")
816 .expect("dirstate-v2 path length overflow")
812 .into()
817 .into()
813 }
818 }
814
819
815 impl From<TruncatedTimestamp> for PackedTruncatedTimestamp {
820 impl From<TruncatedTimestamp> for PackedTruncatedTimestamp {
816 fn from(timestamp: TruncatedTimestamp) -> Self {
821 fn from(timestamp: TruncatedTimestamp) -> Self {
817 Self {
822 Self {
818 truncated_seconds: timestamp.truncated_seconds().into(),
823 truncated_seconds: timestamp.truncated_seconds().into(),
819 nanoseconds: timestamp.nanoseconds().into(),
824 nanoseconds: timestamp.nanoseconds().into(),
820 }
825 }
821 }
826 }
822 }
827 }
823
828
824 impl TryFrom<PackedTruncatedTimestamp> for TruncatedTimestamp {
829 impl TryFrom<PackedTruncatedTimestamp> for TruncatedTimestamp {
825 type Error = DirstateV2ParseError;
830 type Error = DirstateV2ParseError;
826
831
827 fn try_from(
832 fn try_from(
828 timestamp: PackedTruncatedTimestamp,
833 timestamp: PackedTruncatedTimestamp,
829 ) -> Result<Self, Self::Error> {
834 ) -> Result<Self, Self::Error> {
830 Self::from_already_truncated(
835 Self::from_already_truncated(
831 timestamp.truncated_seconds.get(),
836 timestamp.truncated_seconds.get(),
832 timestamp.nanoseconds.get(),
837 timestamp.nanoseconds.get(),
833 false,
838 false,
834 )
839 )
835 }
840 }
836 }
841 }
837 impl PackedTruncatedTimestamp {
842 impl PackedTruncatedTimestamp {
838 fn null() -> Self {
843 fn null() -> Self {
839 Self {
844 Self {
840 truncated_seconds: 0.into(),
845 truncated_seconds: 0.into(),
841 nanoseconds: 0.into(),
846 nanoseconds: 0.into(),
842 }
847 }
843 }
848 }
844 }
849 }
@@ -1,203 +1,202 b''
1 #testcases dirstate-v1 dirstate-v2
1 #testcases dirstate-v1 dirstate-v2
2
2
3 #if dirstate-v2
3 #if dirstate-v2
4 $ cat >> $HGRCPATH << EOF
4 $ cat >> $HGRCPATH << EOF
5 > [format]
5 > [format]
6 > use-dirstate-v2=1
6 > use-dirstate-v2=1
7 > [storage]
7 > [storage]
8 > dirstate-v2.slow-path=allow
8 > dirstate-v2.slow-path=allow
9 > EOF
9 > EOF
10 #endif
10 #endif
11
11
12 ------ Test dirstate._dirs refcounting
12 ------ Test dirstate._dirs refcounting
13
13
14 $ hg init t
14 $ hg init t
15 $ cd t
15 $ cd t
16 $ mkdir -p a/b/c/d
16 $ mkdir -p a/b/c/d
17 $ touch a/b/c/d/x
17 $ touch a/b/c/d/x
18 $ touch a/b/c/d/y
18 $ touch a/b/c/d/y
19 $ touch a/b/c/d/z
19 $ touch a/b/c/d/z
20 $ hg ci -Am m
20 $ hg ci -Am m
21 adding a/b/c/d/x
21 adding a/b/c/d/x
22 adding a/b/c/d/y
22 adding a/b/c/d/y
23 adding a/b/c/d/z
23 adding a/b/c/d/z
24 $ hg mv a z
24 $ hg mv a z
25 moving a/b/c/d/x to z/b/c/d/x
25 moving a/b/c/d/x to z/b/c/d/x
26 moving a/b/c/d/y to z/b/c/d/y
26 moving a/b/c/d/y to z/b/c/d/y
27 moving a/b/c/d/z to z/b/c/d/z
27 moving a/b/c/d/z to z/b/c/d/z
28
28
29 Test name collisions
29 Test name collisions
30
30
31 $ rm z/b/c/d/x
31 $ rm z/b/c/d/x
32 $ mkdir z/b/c/d/x
32 $ mkdir z/b/c/d/x
33 $ touch z/b/c/d/x/y
33 $ touch z/b/c/d/x/y
34 $ hg add z/b/c/d/x/y
34 $ hg add z/b/c/d/x/y
35 abort: file 'z/b/c/d/x' in dirstate clashes with 'z/b/c/d/x/y'
35 abort: file 'z/b/c/d/x' in dirstate clashes with 'z/b/c/d/x/y'
36 [255]
36 [255]
37 $ rm -rf z/b/c/d
37 $ rm -rf z/b/c/d
38 $ touch z/b/c/d
38 $ touch z/b/c/d
39 $ hg add z/b/c/d
39 $ hg add z/b/c/d
40 abort: directory 'z/b/c/d' already in dirstate
40 abort: directory 'z/b/c/d' already in dirstate
41 [255]
41 [255]
42
42
43 $ cd ..
43 $ cd ..
44
44
45 Issue1790: dirstate entry locked into unset if file mtime is set into
45 Issue1790: dirstate entry locked into unset if file mtime is set into
46 the future
46 the future
47
47
48 Prepare test repo:
48 Prepare test repo:
49
49
50 $ hg init u
50 $ hg init u
51 $ cd u
51 $ cd u
52 $ echo a > a
52 $ echo a > a
53 $ hg add
53 $ hg add
54 adding a
54 adding a
55 $ hg ci -m1
55 $ hg ci -m1
56
56
57 Set mtime of a into the future:
57 Set mtime of a into the future:
58
58
59 $ touch -t 203101011200 a
59 $ touch -t 203101011200 a
60
60
61 Status must not set a's entry to unset (issue1790):
61 Status must not set a's entry to unset (issue1790):
62
62
63 $ hg status
63 $ hg status
64 $ hg debugstate
64 $ hg debugstate
65 n 644 2 2031-01-01 12:00:00 a
65 n 644 2 2031-01-01 12:00:00 a
66
66
67 Test modulo storage/comparison of absurd dates:
67 Test modulo storage/comparison of absurd dates:
68
68
69 #if no-aix
69 #if no-aix
70 $ touch -t 195001011200 a
70 $ touch -t 195001011200 a
71 $ hg st
71 $ hg st
72 $ hg debugstate
72 $ hg debugstate
73 n 644 2 2018-01-19 15:14:08 a
73 n 644 2 2018-01-19 15:14:08 a
74 #endif
74 #endif
75
75
76 Verify that exceptions during a dirstate change leave the dirstate
76 Verify that exceptions during a dirstate change leave the dirstate
77 coherent (issue4353)
77 coherent (issue4353)
78
78
79 $ cat > ../dirstateexception.py <<EOF
79 $ cat > ../dirstateexception.py <<EOF
80 > from __future__ import absolute_import
80 > from __future__ import absolute_import
81 > from mercurial import (
81 > from mercurial import (
82 > error,
82 > error,
83 > extensions,
83 > extensions,
84 > mergestate as mergestatemod,
84 > mergestate as mergestatemod,
85 > )
85 > )
86 >
86 >
87 > def wraprecordupdates(*args):
87 > def wraprecordupdates(*args):
88 > raise error.Abort(b"simulated error while recording dirstateupdates")
88 > raise error.Abort(b"simulated error while recording dirstateupdates")
89 >
89 >
90 > def reposetup(ui, repo):
90 > def reposetup(ui, repo):
91 > extensions.wrapfunction(mergestatemod, 'recordupdates',
91 > extensions.wrapfunction(mergestatemod, 'recordupdates',
92 > wraprecordupdates)
92 > wraprecordupdates)
93 > EOF
93 > EOF
94
94
95 $ hg rm a
95 $ hg rm a
96 $ hg commit -m 'rm a'
96 $ hg commit -m 'rm a'
97 $ echo "[extensions]" >> .hg/hgrc
97 $ echo "[extensions]" >> .hg/hgrc
98 $ echo "dirstateex=../dirstateexception.py" >> .hg/hgrc
98 $ echo "dirstateex=../dirstateexception.py" >> .hg/hgrc
99 $ hg up 0
99 $ hg up 0
100 abort: simulated error while recording dirstateupdates
100 abort: simulated error while recording dirstateupdates
101 [255]
101 [255]
102 $ hg log -r . -T '{rev}\n'
102 $ hg log -r . -T '{rev}\n'
103 1
103 1
104 $ hg status
104 $ hg status
105 ? a
105 ? a
106
106
107 #if dirstate-v2
107 #if dirstate-v2
108 Check that folders that are prefixes of others do not throw the packer into an
108 Check that folders that are prefixes of others do not throw the packer into an
109 infinite loop.
109 infinite loop.
110
110
111 $ cd ..
111 $ cd ..
112 $ hg init infinite-loop
112 $ hg init infinite-loop
113 $ cd infinite-loop
113 $ cd infinite-loop
114 $ mkdir hgext3rd hgext
114 $ mkdir hgext3rd hgext
115 $ touch hgext3rd/__init__.py hgext/zeroconf.py
115 $ touch hgext3rd/__init__.py hgext/zeroconf.py
116 $ hg commit -Aqm0
116 $ hg commit -Aqm0
117
117
118 $ hg st -c
118 $ hg st -c
119 C hgext/zeroconf.py
119 C hgext/zeroconf.py
120 C hgext3rd/__init__.py
120 C hgext3rd/__init__.py
121
121
122 $ cd ..
122 $ cd ..
123
123
124 Check that the old dirstate data file is removed correctly and the new one is
124 Check that the old dirstate data file is removed correctly and the new one is
125 valid.
125 valid.
126
126
127 $ dirstate_data_files () {
127 $ dirstate_data_files () {
128 > find .hg -maxdepth 1 -name "dirstate.*"
128 > find .hg -maxdepth 1 -name "dirstate.*"
129 > }
129 > }
130
130
131 $ find_dirstate_uuid () {
131 $ find_dirstate_uuid () {
132 > hg debugstate --docket | grep uuid | sed 's/.*uuid: \(.*\)/\1/'
132 > hg debugstate --docket | grep uuid | sed 's/.*uuid: \(.*\)/\1/'
133 > }
133 > }
134
134
135 $ dirstate_uuid_has_not_changed () {
135 $ dirstate_uuid_has_not_changed () {
136 > # Pure Python always rewrites the whole dirstate
136 > # Pure Python always rewrites the whole dirstate
137 > if [ $# -eq 1 ] || [ "$HGMODULEPOLICY" = *"rust"* ] || [ -n "$RHG_INSTALLED_AS_HG" ]; then
137 > if [ $# -eq 1 ] || [ "$HGMODULEPOLICY" = *"rust"* ] || [ -n "$RHG_INSTALLED_AS_HG" ]; then
138 > test $current_uid = $(find_dirstate_uuid)
138 > test $current_uid = $(find_dirstate_uuid)
139 > fi
139 > fi
140 > }
140 > }
141
141
142 $ cd ..
142 $ cd ..
143 $ hg init append-mostly
143 $ hg init append-mostly
144 $ cd append-mostly
144 $ cd append-mostly
145 $ mkdir dir dir2
145 $ mkdir dir dir2
146 $ touch dir/a dir/b dir/c dir/d dir/e dir2/f
146 $ touch dir/a dir/b dir/c dir/d dir/e dir2/f
147 $ hg commit -Aqm initial
147 $ hg commit -Aqm initial
148 $ hg st
148 $ hg st
149 $ dirstate_data_files | wc -l
149 $ dirstate_data_files | wc -l
150 *1 (re)
150 *1 (re)
151 $ current_uid=$(find_dirstate_uuid)
151 $ current_uid=$(find_dirstate_uuid)
152
152
153 Nothing changes here
153 Nothing changes here
154
154
155 $ hg st
155 $ hg st
156 $ dirstate_data_files | wc -l
156 $ dirstate_data_files | wc -l
157 *1 (re)
157 *1 (re)
158 $ dirstate_uuid_has_not_changed
158 $ dirstate_uuid_has_not_changed
159
159
160 Trigger an append with a small change
160 Trigger an append with a small change
161
161
162 $ echo "modified" > dir2/f
162 $ echo "modified" > dir2/f
163 $ hg st
163 $ hg st
164 M dir2/f
164 M dir2/f
165 $ dirstate_data_files | wc -l
165 $ dirstate_data_files | wc -l
166 *1 (re)
166 *1 (re)
167 $ dirstate_uuid_has_not_changed
167 $ dirstate_uuid_has_not_changed
168
168
169 Unused bytes counter is non-0 when appending
169 Unused bytes counter is non-0 when appending
170 $ touch file
170 $ touch file
171 $ hg add file
171 $ hg add file
172 $ current_uid=$(find_dirstate_uuid)
172 $ current_uid=$(find_dirstate_uuid)
173
173
174 Trigger a rust/rhg run which updates the unused bytes value
174 Trigger a rust/rhg run which updates the unused bytes value
175 $ hg st
175 $ hg st
176 M dir2/f
176 M dir2/f
177 A file
177 A file
178 $ dirstate_data_files | wc -l
178 $ dirstate_data_files | wc -l
179 *1 (re)
179 *1 (re)
180 $ dirstate_uuid_has_not_changed
180 $ dirstate_uuid_has_not_changed
181
181
182 $ hg debugstate --docket | grep unused
182 $ hg debugstate --docket | grep unused
183 number of unused bytes: 0 (no-rust no-rhg !)
183 number of unused bytes: 0 (no-rust no-rhg !)
184 number of unused bytes: [1-9]\d* (re) (rhg no-rust !)
184 number of unused bytes: [1-9]\d* (re) (rhg no-rust !)
185 number of unused bytes: [1-9]\d* (re) (rust no-rhg !)
185 number of unused bytes: [1-9]\d* (re) (rust no-rhg !)
186 number of unused bytes: [1-9]\d* (re) (rust rhg !)
186 number of unused bytes: [1-9]\d* (re) (rust rhg !)
187
187
188 Delete most of the dirstate to trigger a non-append
188 Delete most of the dirstate to trigger a non-append
189 $ hg rm dir/a dir/b dir/c dir/d
189 $ hg rm dir/a dir/b dir/c dir/d
190 $ dirstate_data_files | wc -l
190 $ dirstate_data_files | wc -l
191 *1 (re)
191 *1 (re)
192 $ dirstate_uuid_has_not_changed also-if-python
192 $ dirstate_uuid_has_not_changed also-if-python
193 [1]
193 [1]
194
194
195 Check that unused bytes counter is reset when creating a new docket
195 Check that unused bytes counter is reset when creating a new docket
196
196
197 $ hg debugstate --docket | grep unused
197 $ hg debugstate --docket | grep unused
198 number of unused bytes: 0 (no-rust !)
198 number of unused bytes: 0
199 number of unused bytes: [1-9]\d* (re) (rust known-bad-output !)
200
199
201 #endif
200 #endif
202
201
203 $ cd ..
202 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now