##// END OF EJS Templates
rhg-files: reuse centralized dirstate logic...
Raphaël Gomès -
r50875:95ffa065 default
parent child Browse files
Show More
@@ -1,875 +1,849 b''
1 //! The "version 2" disk representation of the dirstate
1 //! The "version 2" disk representation of the dirstate
2 //!
2 //!
3 //! See `mercurial/helptext/internals/dirstate-v2.txt`
3 //! See `mercurial/helptext/internals/dirstate-v2.txt`
4
4
5 use crate::dirstate::{DirstateV2Data, TruncatedTimestamp};
5 use crate::dirstate::{DirstateV2Data, TruncatedTimestamp};
6 use crate::dirstate_tree::dirstate_map::DirstateVersion;
6 use crate::dirstate_tree::dirstate_map::DirstateVersion;
7 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
7 use crate::dirstate_tree::dirstate_map::{self, DirstateMap, NodeRef};
8 use crate::dirstate_tree::path_with_basename::WithBasename;
8 use crate::dirstate_tree::path_with_basename::WithBasename;
9 use crate::errors::HgError;
9 use crate::errors::HgError;
10 use crate::utils::hg_path::HgPath;
10 use crate::utils::hg_path::HgPath;
11 use crate::DirstateEntry;
11 use crate::DirstateEntry;
12 use crate::DirstateError;
12 use crate::DirstateError;
13 use crate::DirstateParents;
13 use crate::DirstateParents;
14 use bitflags::bitflags;
14 use bitflags::bitflags;
15 use bytes_cast::unaligned::{U16Be, U32Be};
15 use bytes_cast::unaligned::{U16Be, U32Be};
16 use bytes_cast::BytesCast;
16 use bytes_cast::BytesCast;
17 use format_bytes::format_bytes;
17 use format_bytes::format_bytes;
18 use rand::Rng;
18 use rand::Rng;
19 use std::borrow::Cow;
19 use std::borrow::Cow;
20 use std::fmt::Write;
20 use std::fmt::Write;
21
21
22 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
22 /// Added at the start of `.hg/dirstate` when the "v2" format is used.
23 /// This a redundant sanity check more than an actual "magic number" since
23 /// This a redundant sanity check more than an actual "magic number" since
24 /// `.hg/requires` already governs which format should be used.
24 /// `.hg/requires` already governs which format should be used.
25 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
25 pub const V2_FORMAT_MARKER: &[u8; 12] = b"dirstate-v2\n";
26
26
27 /// Keep space for 256-bit hashes
27 /// Keep space for 256-bit hashes
28 const STORED_NODE_ID_BYTES: usize = 32;
28 const STORED_NODE_ID_BYTES: usize = 32;
29
29
30 /// … even though only 160 bits are used for now, with SHA-1
30 /// … even though only 160 bits are used for now, with SHA-1
31 const USED_NODE_ID_BYTES: usize = 20;
31 const USED_NODE_ID_BYTES: usize = 20;
32
32
33 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
33 pub(super) const IGNORE_PATTERNS_HASH_LEN: usize = 20;
34 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
34 pub(super) type IgnorePatternsHash = [u8; IGNORE_PATTERNS_HASH_LEN];
35
35
36 /// Must match constants of the same names in `mercurial/dirstateutils/v2.py`
36 /// Must match constants of the same names in `mercurial/dirstateutils/v2.py`
37 const TREE_METADATA_SIZE: usize = 44;
37 const TREE_METADATA_SIZE: usize = 44;
38 const NODE_SIZE: usize = 44;
38 const NODE_SIZE: usize = 44;
39
39
40 /// Make sure that size-affecting changes are made knowingly
40 /// Make sure that size-affecting changes are made knowingly
41 #[allow(unused)]
41 #[allow(unused)]
42 fn static_assert_size_of() {
42 fn static_assert_size_of() {
43 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
43 let _ = std::mem::transmute::<TreeMetadata, [u8; TREE_METADATA_SIZE]>;
44 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
44 let _ = std::mem::transmute::<DocketHeader, [u8; TREE_METADATA_SIZE + 81]>;
45 let _ = std::mem::transmute::<Node, [u8; NODE_SIZE]>;
45 let _ = std::mem::transmute::<Node, [u8; NODE_SIZE]>;
46 }
46 }
47
47
48 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
48 // Must match `HEADER` in `mercurial/dirstateutils/docket.py`
49 #[derive(BytesCast)]
49 #[derive(BytesCast)]
50 #[repr(C)]
50 #[repr(C)]
51 struct DocketHeader {
51 struct DocketHeader {
52 marker: [u8; V2_FORMAT_MARKER.len()],
52 marker: [u8; V2_FORMAT_MARKER.len()],
53 parent_1: [u8; STORED_NODE_ID_BYTES],
53 parent_1: [u8; STORED_NODE_ID_BYTES],
54 parent_2: [u8; STORED_NODE_ID_BYTES],
54 parent_2: [u8; STORED_NODE_ID_BYTES],
55
55
56 metadata: TreeMetadata,
56 metadata: TreeMetadata,
57
57
58 /// Counted in bytes
58 /// Counted in bytes
59 data_size: Size,
59 data_size: Size,
60
60
61 uuid_size: u8,
61 uuid_size: u8,
62 }
62 }
63
63
64 pub struct Docket<'on_disk> {
64 pub struct Docket<'on_disk> {
65 header: &'on_disk DocketHeader,
65 header: &'on_disk DocketHeader,
66 pub uuid: &'on_disk [u8],
66 pub uuid: &'on_disk [u8],
67 }
67 }
68
68
69 /// Fields are documented in the *Tree metadata in the docket file*
69 /// Fields are documented in the *Tree metadata in the docket file*
70 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
70 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
71 #[derive(BytesCast)]
71 #[derive(BytesCast)]
72 #[repr(C)]
72 #[repr(C)]
73 pub struct TreeMetadata {
73 pub struct TreeMetadata {
74 root_nodes: ChildNodes,
74 root_nodes: ChildNodes,
75 nodes_with_entry_count: Size,
75 nodes_with_entry_count: Size,
76 nodes_with_copy_source_count: Size,
76 nodes_with_copy_source_count: Size,
77 unreachable_bytes: Size,
77 unreachable_bytes: Size,
78 unused: [u8; 4],
78 unused: [u8; 4],
79
79
80 /// See *Optional hash of ignore patterns* section of
80 /// See *Optional hash of ignore patterns* section of
81 /// `mercurial/helptext/internals/dirstate-v2.txt`
81 /// `mercurial/helptext/internals/dirstate-v2.txt`
82 ignore_patterns_hash: IgnorePatternsHash,
82 ignore_patterns_hash: IgnorePatternsHash,
83 }
83 }
84
84
85 /// Fields are documented in the *The data file format*
85 /// Fields are documented in the *The data file format*
86 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
86 /// section of `mercurial/helptext/internals/dirstate-v2.txt`
87 #[derive(BytesCast, Debug)]
87 #[derive(BytesCast, Debug)]
88 #[repr(C)]
88 #[repr(C)]
89 pub(super) struct Node {
89 pub(super) struct Node {
90 full_path: PathSlice,
90 full_path: PathSlice,
91
91
92 /// In bytes from `self.full_path.start`
92 /// In bytes from `self.full_path.start`
93 base_name_start: PathSize,
93 base_name_start: PathSize,
94
94
95 copy_source: OptPathSlice,
95 copy_source: OptPathSlice,
96 children: ChildNodes,
96 children: ChildNodes,
97 pub(super) descendants_with_entry_count: Size,
97 pub(super) descendants_with_entry_count: Size,
98 pub(super) tracked_descendants_count: Size,
98 pub(super) tracked_descendants_count: Size,
99 flags: U16Be,
99 flags: U16Be,
100 size: U32Be,
100 size: U32Be,
101 mtime: PackedTruncatedTimestamp,
101 mtime: PackedTruncatedTimestamp,
102 }
102 }
103
103
104 bitflags! {
104 bitflags! {
105 #[repr(C)]
105 #[repr(C)]
106 struct Flags: u16 {
106 struct Flags: u16 {
107 const WDIR_TRACKED = 1 << 0;
107 const WDIR_TRACKED = 1 << 0;
108 const P1_TRACKED = 1 << 1;
108 const P1_TRACKED = 1 << 1;
109 const P2_INFO = 1 << 2;
109 const P2_INFO = 1 << 2;
110 const MODE_EXEC_PERM = 1 << 3;
110 const MODE_EXEC_PERM = 1 << 3;
111 const MODE_IS_SYMLINK = 1 << 4;
111 const MODE_IS_SYMLINK = 1 << 4;
112 const HAS_FALLBACK_EXEC = 1 << 5;
112 const HAS_FALLBACK_EXEC = 1 << 5;
113 const FALLBACK_EXEC = 1 << 6;
113 const FALLBACK_EXEC = 1 << 6;
114 const HAS_FALLBACK_SYMLINK = 1 << 7;
114 const HAS_FALLBACK_SYMLINK = 1 << 7;
115 const FALLBACK_SYMLINK = 1 << 8;
115 const FALLBACK_SYMLINK = 1 << 8;
116 const EXPECTED_STATE_IS_MODIFIED = 1 << 9;
116 const EXPECTED_STATE_IS_MODIFIED = 1 << 9;
117 const HAS_MODE_AND_SIZE = 1 <<10;
117 const HAS_MODE_AND_SIZE = 1 <<10;
118 const HAS_MTIME = 1 <<11;
118 const HAS_MTIME = 1 <<11;
119 const MTIME_SECOND_AMBIGUOUS = 1 << 12;
119 const MTIME_SECOND_AMBIGUOUS = 1 << 12;
120 const DIRECTORY = 1 <<13;
120 const DIRECTORY = 1 <<13;
121 const ALL_UNKNOWN_RECORDED = 1 <<14;
121 const ALL_UNKNOWN_RECORDED = 1 <<14;
122 const ALL_IGNORED_RECORDED = 1 <<15;
122 const ALL_IGNORED_RECORDED = 1 <<15;
123 }
123 }
124 }
124 }
125
125
126 /// Duration since the Unix epoch
126 /// Duration since the Unix epoch
127 #[derive(BytesCast, Copy, Clone, Debug)]
127 #[derive(BytesCast, Copy, Clone, Debug)]
128 #[repr(C)]
128 #[repr(C)]
129 struct PackedTruncatedTimestamp {
129 struct PackedTruncatedTimestamp {
130 truncated_seconds: U32Be,
130 truncated_seconds: U32Be,
131 nanoseconds: U32Be,
131 nanoseconds: U32Be,
132 }
132 }
133
133
134 /// Counted in bytes from the start of the file
134 /// Counted in bytes from the start of the file
135 ///
135 ///
136 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
136 /// NOTE: not supporting `.hg/dirstate` files larger than 4 GiB.
137 type Offset = U32Be;
137 type Offset = U32Be;
138
138
139 /// Counted in number of items
139 /// Counted in number of items
140 ///
140 ///
141 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
141 /// NOTE: we choose not to support counting more than 4 billion nodes anywhere.
142 type Size = U32Be;
142 type Size = U32Be;
143
143
144 /// Counted in bytes
144 /// Counted in bytes
145 ///
145 ///
146 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
146 /// NOTE: we choose not to support file names/paths longer than 64 KiB.
147 type PathSize = U16Be;
147 type PathSize = U16Be;
148
148
149 /// A contiguous sequence of `len` times `Node`, representing the child nodes
149 /// A contiguous sequence of `len` times `Node`, representing the child nodes
150 /// of either some other node or of the repository root.
150 /// of either some other node or of the repository root.
151 ///
151 ///
152 /// Always sorted by ascending `full_path`, to allow binary search.
152 /// Always sorted by ascending `full_path`, to allow binary search.
153 /// Since nodes with the same parent nodes also have the same parent path,
153 /// Since nodes with the same parent nodes also have the same parent path,
154 /// only the `base_name`s need to be compared during binary search.
154 /// only the `base_name`s need to be compared during binary search.
155 #[derive(BytesCast, Copy, Clone, Debug)]
155 #[derive(BytesCast, Copy, Clone, Debug)]
156 #[repr(C)]
156 #[repr(C)]
157 struct ChildNodes {
157 struct ChildNodes {
158 start: Offset,
158 start: Offset,
159 len: Size,
159 len: Size,
160 }
160 }
161
161
162 /// A `HgPath` of `len` bytes
162 /// A `HgPath` of `len` bytes
163 #[derive(BytesCast, Copy, Clone, Debug)]
163 #[derive(BytesCast, Copy, Clone, Debug)]
164 #[repr(C)]
164 #[repr(C)]
165 struct PathSlice {
165 struct PathSlice {
166 start: Offset,
166 start: Offset,
167 len: PathSize,
167 len: PathSize,
168 }
168 }
169
169
170 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
170 /// Either nothing if `start == 0`, or a `HgPath` of `len` bytes
171 type OptPathSlice = PathSlice;
171 type OptPathSlice = PathSlice;
172
172
173 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
173 /// Unexpected file format found in `.hg/dirstate` with the "v2" format.
174 ///
174 ///
175 /// This should only happen if Mercurial is buggy or a repository is corrupted.
175 /// This should only happen if Mercurial is buggy or a repository is corrupted.
176 #[derive(Debug)]
176 #[derive(Debug)]
177 pub struct DirstateV2ParseError {
177 pub struct DirstateV2ParseError {
178 message: String,
178 message: String,
179 }
179 }
180
180
181 impl DirstateV2ParseError {
181 impl DirstateV2ParseError {
182 pub fn new<S: Into<String>>(message: S) -> Self {
182 pub fn new<S: Into<String>>(message: S) -> Self {
183 Self {
183 Self {
184 message: message.into(),
184 message: message.into(),
185 }
185 }
186 }
186 }
187 }
187 }
188
188
189 impl From<DirstateV2ParseError> for HgError {
189 impl From<DirstateV2ParseError> for HgError {
190 fn from(e: DirstateV2ParseError) -> Self {
190 fn from(e: DirstateV2ParseError) -> Self {
191 HgError::corrupted(format!("dirstate-v2 parse error: {}", e.message))
191 HgError::corrupted(format!("dirstate-v2 parse error: {}", e.message))
192 }
192 }
193 }
193 }
194
194
195 impl From<DirstateV2ParseError> for crate::DirstateError {
195 impl From<DirstateV2ParseError> for crate::DirstateError {
196 fn from(error: DirstateV2ParseError) -> Self {
196 fn from(error: DirstateV2ParseError) -> Self {
197 HgError::from(error).into()
197 HgError::from(error).into()
198 }
198 }
199 }
199 }
200
200
201 impl TreeMetadata {
201 impl TreeMetadata {
202 pub fn as_bytes(&self) -> &[u8] {
202 pub fn as_bytes(&self) -> &[u8] {
203 BytesCast::as_bytes(self)
203 BytesCast::as_bytes(self)
204 }
204 }
205 }
205 }
206
206
207 impl<'on_disk> Docket<'on_disk> {
207 impl<'on_disk> Docket<'on_disk> {
208 /// Generate the identifier for a new data file
208 /// Generate the identifier for a new data file
209 ///
209 ///
210 /// TODO: support the `HGTEST_UUIDFILE` environment variable.
210 /// TODO: support the `HGTEST_UUIDFILE` environment variable.
211 /// See `mercurial/revlogutils/docket.py`
211 /// See `mercurial/revlogutils/docket.py`
212 pub fn new_uid() -> String {
212 pub fn new_uid() -> String {
213 const ID_LENGTH: usize = 8;
213 const ID_LENGTH: usize = 8;
214 let mut id = String::with_capacity(ID_LENGTH);
214 let mut id = String::with_capacity(ID_LENGTH);
215 let mut rng = rand::thread_rng();
215 let mut rng = rand::thread_rng();
216 for _ in 0..ID_LENGTH {
216 for _ in 0..ID_LENGTH {
217 // One random hexadecimal digit.
217 // One random hexadecimal digit.
218 // `unwrap` never panics because `impl Write for String`
218 // `unwrap` never panics because `impl Write for String`
219 // never returns an error.
219 // never returns an error.
220 write!(&mut id, "{:x}", rng.gen_range(0..16)).unwrap();
220 write!(&mut id, "{:x}", rng.gen_range(0..16)).unwrap();
221 }
221 }
222 id
222 id
223 }
223 }
224
224
225 pub fn serialize(
225 pub fn serialize(
226 parents: DirstateParents,
226 parents: DirstateParents,
227 tree_metadata: TreeMetadata,
227 tree_metadata: TreeMetadata,
228 data_size: u64,
228 data_size: u64,
229 uuid: &[u8],
229 uuid: &[u8],
230 ) -> Result<Vec<u8>, std::num::TryFromIntError> {
230 ) -> Result<Vec<u8>, std::num::TryFromIntError> {
231 let header = DocketHeader {
231 let header = DocketHeader {
232 marker: *V2_FORMAT_MARKER,
232 marker: *V2_FORMAT_MARKER,
233 parent_1: parents.p1.pad_to_256_bits(),
233 parent_1: parents.p1.pad_to_256_bits(),
234 parent_2: parents.p2.pad_to_256_bits(),
234 parent_2: parents.p2.pad_to_256_bits(),
235 metadata: tree_metadata,
235 metadata: tree_metadata,
236 data_size: u32::try_from(data_size)?.into(),
236 data_size: u32::try_from(data_size)?.into(),
237 uuid_size: uuid.len().try_into()?,
237 uuid_size: uuid.len().try_into()?,
238 };
238 };
239 let header = header.as_bytes();
239 let header = header.as_bytes();
240 let mut docket = Vec::with_capacity(header.len() + uuid.len());
240 let mut docket = Vec::with_capacity(header.len() + uuid.len());
241 docket.extend_from_slice(header);
241 docket.extend_from_slice(header);
242 docket.extend_from_slice(uuid);
242 docket.extend_from_slice(uuid);
243 Ok(docket)
243 Ok(docket)
244 }
244 }
245
245
246 pub fn parents(&self) -> DirstateParents {
246 pub fn parents(&self) -> DirstateParents {
247 use crate::Node;
247 use crate::Node;
248 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
248 let p1 = Node::try_from(&self.header.parent_1[..USED_NODE_ID_BYTES])
249 .unwrap();
249 .unwrap();
250 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
250 let p2 = Node::try_from(&self.header.parent_2[..USED_NODE_ID_BYTES])
251 .unwrap();
251 .unwrap();
252 DirstateParents { p1, p2 }
252 DirstateParents { p1, p2 }
253 }
253 }
254
254
255 pub fn tree_metadata(&self) -> &[u8] {
255 pub fn tree_metadata(&self) -> &[u8] {
256 self.header.metadata.as_bytes()
256 self.header.metadata.as_bytes()
257 }
257 }
258
258
259 pub fn data_size(&self) -> usize {
259 pub fn data_size(&self) -> usize {
260 // This `unwrap` could only panic on a 16-bit CPU
260 // This `unwrap` could only panic on a 16-bit CPU
261 self.header.data_size.get().try_into().unwrap()
261 self.header.data_size.get().try_into().unwrap()
262 }
262 }
263
263
264 pub fn data_filename(&self) -> String {
264 pub fn data_filename(&self) -> String {
265 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
265 String::from_utf8(format_bytes!(b"dirstate.{}", self.uuid)).unwrap()
266 }
266 }
267 }
267 }
268
268
269 pub fn read_docket(
269 pub fn read_docket(
270 on_disk: &[u8],
270 on_disk: &[u8],
271 ) -> Result<Docket<'_>, DirstateV2ParseError> {
271 ) -> Result<Docket<'_>, DirstateV2ParseError> {
272 let (header, uuid) = DocketHeader::from_bytes(on_disk).map_err(|e| {
272 let (header, uuid) = DocketHeader::from_bytes(on_disk).map_err(|e| {
273 DirstateV2ParseError::new(format!("when reading docket, {}", e))
273 DirstateV2ParseError::new(format!("when reading docket, {}", e))
274 })?;
274 })?;
275 let uuid_size = header.uuid_size as usize;
275 let uuid_size = header.uuid_size as usize;
276 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
276 if header.marker == *V2_FORMAT_MARKER && uuid.len() == uuid_size {
277 Ok(Docket { header, uuid })
277 Ok(Docket { header, uuid })
278 } else {
278 } else {
279 Err(DirstateV2ParseError::new(
279 Err(DirstateV2ParseError::new(
280 "invalid format marker or uuid size",
280 "invalid format marker or uuid size",
281 ))
281 ))
282 }
282 }
283 }
283 }
284
284
285 pub(super) fn read<'on_disk>(
285 pub(super) fn read<'on_disk>(
286 on_disk: &'on_disk [u8],
286 on_disk: &'on_disk [u8],
287 metadata: &[u8],
287 metadata: &[u8],
288 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
288 ) -> Result<DirstateMap<'on_disk>, DirstateV2ParseError> {
289 if on_disk.is_empty() {
289 if on_disk.is_empty() {
290 let mut map = DirstateMap::empty(on_disk);
290 let mut map = DirstateMap::empty(on_disk);
291 map.dirstate_version = DirstateVersion::V2;
291 map.dirstate_version = DirstateVersion::V2;
292 return Ok(map);
292 return Ok(map);
293 }
293 }
294 let (meta, _) = TreeMetadata::from_bytes(metadata).map_err(|e| {
294 let (meta, _) = TreeMetadata::from_bytes(metadata).map_err(|e| {
295 DirstateV2ParseError::new(format!("when parsing tree metadata, {}", e))
295 DirstateV2ParseError::new(format!("when parsing tree metadata, {}", e))
296 })?;
296 })?;
297 let dirstate_map = DirstateMap {
297 let dirstate_map = DirstateMap {
298 on_disk,
298 on_disk,
299 root: dirstate_map::ChildNodes::OnDisk(
299 root: dirstate_map::ChildNodes::OnDisk(
300 read_nodes(on_disk, meta.root_nodes).map_err(|mut e| {
300 read_nodes(on_disk, meta.root_nodes).map_err(|mut e| {
301 e.message = format!("{}, when reading root notes", e.message);
301 e.message = format!("{}, when reading root notes", e.message);
302 e
302 e
303 })?,
303 })?,
304 ),
304 ),
305 nodes_with_entry_count: meta.nodes_with_entry_count.get(),
305 nodes_with_entry_count: meta.nodes_with_entry_count.get(),
306 nodes_with_copy_source_count: meta.nodes_with_copy_source_count.get(),
306 nodes_with_copy_source_count: meta.nodes_with_copy_source_count.get(),
307 ignore_patterns_hash: meta.ignore_patterns_hash,
307 ignore_patterns_hash: meta.ignore_patterns_hash,
308 unreachable_bytes: meta.unreachable_bytes.get(),
308 unreachable_bytes: meta.unreachable_bytes.get(),
309 old_data_size: on_disk.len(),
309 old_data_size: on_disk.len(),
310 dirstate_version: DirstateVersion::V2,
310 dirstate_version: DirstateVersion::V2,
311 };
311 };
312 Ok(dirstate_map)
312 Ok(dirstate_map)
313 }
313 }
314
314
315 impl Node {
315 impl Node {
316 pub(super) fn full_path<'on_disk>(
316 pub(super) fn full_path<'on_disk>(
317 &self,
317 &self,
318 on_disk: &'on_disk [u8],
318 on_disk: &'on_disk [u8],
319 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
319 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
320 read_hg_path(on_disk, self.full_path)
320 read_hg_path(on_disk, self.full_path)
321 }
321 }
322
322
323 pub(super) fn base_name_start(
323 pub(super) fn base_name_start(
324 &self,
324 &self,
325 ) -> Result<usize, DirstateV2ParseError> {
325 ) -> Result<usize, DirstateV2ParseError> {
326 let start = self.base_name_start.get();
326 let start = self.base_name_start.get();
327 if start < self.full_path.len.get() {
327 if start < self.full_path.len.get() {
328 let start = usize::try_from(start)
328 let start = usize::try_from(start)
329 // u32 -> usize, could only panic on a 16-bit CPU
329 // u32 -> usize, could only panic on a 16-bit CPU
330 .expect("dirstate-v2 base_name_start out of bounds");
330 .expect("dirstate-v2 base_name_start out of bounds");
331 Ok(start)
331 Ok(start)
332 } else {
332 } else {
333 Err(DirstateV2ParseError::new("not enough bytes for base name"))
333 Err(DirstateV2ParseError::new("not enough bytes for base name"))
334 }
334 }
335 }
335 }
336
336
337 pub(super) fn base_name<'on_disk>(
337 pub(super) fn base_name<'on_disk>(
338 &self,
338 &self,
339 on_disk: &'on_disk [u8],
339 on_disk: &'on_disk [u8],
340 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
340 ) -> Result<&'on_disk HgPath, DirstateV2ParseError> {
341 let full_path = self.full_path(on_disk)?;
341 let full_path = self.full_path(on_disk)?;
342 let base_name_start = self.base_name_start()?;
342 let base_name_start = self.base_name_start()?;
343 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
343 Ok(HgPath::new(&full_path.as_bytes()[base_name_start..]))
344 }
344 }
345
345
346 pub(super) fn path<'on_disk>(
346 pub(super) fn path<'on_disk>(
347 &self,
347 &self,
348 on_disk: &'on_disk [u8],
348 on_disk: &'on_disk [u8],
349 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
349 ) -> Result<dirstate_map::NodeKey<'on_disk>, DirstateV2ParseError> {
350 Ok(WithBasename::from_raw_parts(
350 Ok(WithBasename::from_raw_parts(
351 Cow::Borrowed(self.full_path(on_disk)?),
351 Cow::Borrowed(self.full_path(on_disk)?),
352 self.base_name_start()?,
352 self.base_name_start()?,
353 ))
353 ))
354 }
354 }
355
355
356 pub(super) fn has_copy_source(&self) -> bool {
356 pub(super) fn has_copy_source(&self) -> bool {
357 self.copy_source.start.get() != 0
357 self.copy_source.start.get() != 0
358 }
358 }
359
359
360 pub(super) fn copy_source<'on_disk>(
360 pub(super) fn copy_source<'on_disk>(
361 &self,
361 &self,
362 on_disk: &'on_disk [u8],
362 on_disk: &'on_disk [u8],
363 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
363 ) -> Result<Option<&'on_disk HgPath>, DirstateV2ParseError> {
364 Ok(if self.has_copy_source() {
364 Ok(if self.has_copy_source() {
365 Some(read_hg_path(on_disk, self.copy_source)?)
365 Some(read_hg_path(on_disk, self.copy_source)?)
366 } else {
366 } else {
367 None
367 None
368 })
368 })
369 }
369 }
370
370
371 fn flags(&self) -> Flags {
371 fn flags(&self) -> Flags {
372 Flags::from_bits_truncate(self.flags.get())
372 Flags::from_bits_truncate(self.flags.get())
373 }
373 }
374
374
375 fn has_entry(&self) -> bool {
375 fn has_entry(&self) -> bool {
376 self.flags().intersects(
376 self.flags().intersects(
377 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
377 Flags::WDIR_TRACKED | Flags::P1_TRACKED | Flags::P2_INFO,
378 )
378 )
379 }
379 }
380
380
381 pub(super) fn node_data(
381 pub(super) fn node_data(
382 &self,
382 &self,
383 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
383 ) -> Result<dirstate_map::NodeData, DirstateV2ParseError> {
384 if self.has_entry() {
384 if self.has_entry() {
385 Ok(dirstate_map::NodeData::Entry(self.assume_entry()?))
385 Ok(dirstate_map::NodeData::Entry(self.assume_entry()?))
386 } else if let Some(mtime) = self.cached_directory_mtime()? {
386 } else if let Some(mtime) = self.cached_directory_mtime()? {
387 Ok(dirstate_map::NodeData::CachedDirectory { mtime })
387 Ok(dirstate_map::NodeData::CachedDirectory { mtime })
388 } else {
388 } else {
389 Ok(dirstate_map::NodeData::None)
389 Ok(dirstate_map::NodeData::None)
390 }
390 }
391 }
391 }
392
392
393 pub(super) fn cached_directory_mtime(
393 pub(super) fn cached_directory_mtime(
394 &self,
394 &self,
395 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
395 ) -> Result<Option<TruncatedTimestamp>, DirstateV2ParseError> {
396 // For now we do not have code to handle the absence of
396 // For now we do not have code to handle the absence of
397 // ALL_UNKNOWN_RECORDED, so we ignore the mtime if the flag is
397 // ALL_UNKNOWN_RECORDED, so we ignore the mtime if the flag is
398 // unset.
398 // unset.
399 if self.flags().contains(Flags::DIRECTORY)
399 if self.flags().contains(Flags::DIRECTORY)
400 && self.flags().contains(Flags::HAS_MTIME)
400 && self.flags().contains(Flags::HAS_MTIME)
401 && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
401 && self.flags().contains(Flags::ALL_UNKNOWN_RECORDED)
402 {
402 {
403 Ok(Some(self.mtime()?))
403 Ok(Some(self.mtime()?))
404 } else {
404 } else {
405 Ok(None)
405 Ok(None)
406 }
406 }
407 }
407 }
408
408
409 fn synthesize_unix_mode(&self) -> u32 {
409 fn synthesize_unix_mode(&self) -> u32 {
410 let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
410 let file_type = if self.flags().contains(Flags::MODE_IS_SYMLINK) {
411 libc::S_IFLNK
411 libc::S_IFLNK
412 } else {
412 } else {
413 libc::S_IFREG
413 libc::S_IFREG
414 };
414 };
415 let permissions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
415 let permissions = if self.flags().contains(Flags::MODE_EXEC_PERM) {
416 0o755
416 0o755
417 } else {
417 } else {
418 0o644
418 0o644
419 };
419 };
420 file_type | permissions
420 file_type | permissions
421 }
421 }
422
422
423 fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
423 fn mtime(&self) -> Result<TruncatedTimestamp, DirstateV2ParseError> {
424 let mut m: TruncatedTimestamp = self.mtime.try_into()?;
424 let mut m: TruncatedTimestamp = self.mtime.try_into()?;
425 if self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) {
425 if self.flags().contains(Flags::MTIME_SECOND_AMBIGUOUS) {
426 m.second_ambiguous = true;
426 m.second_ambiguous = true;
427 }
427 }
428 Ok(m)
428 Ok(m)
429 }
429 }
430
430
431 fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
431 fn assume_entry(&self) -> Result<DirstateEntry, DirstateV2ParseError> {
432 // TODO: convert through raw bits instead?
432 // TODO: convert through raw bits instead?
433 let wc_tracked = self.flags().contains(Flags::WDIR_TRACKED);
433 let wc_tracked = self.flags().contains(Flags::WDIR_TRACKED);
434 let p1_tracked = self.flags().contains(Flags::P1_TRACKED);
434 let p1_tracked = self.flags().contains(Flags::P1_TRACKED);
435 let p2_info = self.flags().contains(Flags::P2_INFO);
435 let p2_info = self.flags().contains(Flags::P2_INFO);
436 let mode_size = if self.flags().contains(Flags::HAS_MODE_AND_SIZE)
436 let mode_size = if self.flags().contains(Flags::HAS_MODE_AND_SIZE)
437 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
437 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
438 {
438 {
439 Some((self.synthesize_unix_mode(), self.size.into()))
439 Some((self.synthesize_unix_mode(), self.size.into()))
440 } else {
440 } else {
441 None
441 None
442 };
442 };
443 let mtime = if self.flags().contains(Flags::HAS_MTIME)
443 let mtime = if self.flags().contains(Flags::HAS_MTIME)
444 && !self.flags().contains(Flags::DIRECTORY)
444 && !self.flags().contains(Flags::DIRECTORY)
445 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
445 && !self.flags().contains(Flags::EXPECTED_STATE_IS_MODIFIED)
446 {
446 {
447 Some(self.mtime()?)
447 Some(self.mtime()?)
448 } else {
448 } else {
449 None
449 None
450 };
450 };
451 let fallback_exec = if self.flags().contains(Flags::HAS_FALLBACK_EXEC)
451 let fallback_exec = if self.flags().contains(Flags::HAS_FALLBACK_EXEC)
452 {
452 {
453 Some(self.flags().contains(Flags::FALLBACK_EXEC))
453 Some(self.flags().contains(Flags::FALLBACK_EXEC))
454 } else {
454 } else {
455 None
455 None
456 };
456 };
457 let fallback_symlink =
457 let fallback_symlink =
458 if self.flags().contains(Flags::HAS_FALLBACK_SYMLINK) {
458 if self.flags().contains(Flags::HAS_FALLBACK_SYMLINK) {
459 Some(self.flags().contains(Flags::FALLBACK_SYMLINK))
459 Some(self.flags().contains(Flags::FALLBACK_SYMLINK))
460 } else {
460 } else {
461 None
461 None
462 };
462 };
463 Ok(DirstateEntry::from_v2_data(DirstateV2Data {
463 Ok(DirstateEntry::from_v2_data(DirstateV2Data {
464 wc_tracked,
464 wc_tracked,
465 p1_tracked,
465 p1_tracked,
466 p2_info,
466 p2_info,
467 mode_size,
467 mode_size,
468 mtime,
468 mtime,
469 fallback_exec,
469 fallback_exec,
470 fallback_symlink,
470 fallback_symlink,
471 }))
471 }))
472 }
472 }
473
473
474 pub(super) fn entry(
474 pub(super) fn entry(
475 &self,
475 &self,
476 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
476 ) -> Result<Option<DirstateEntry>, DirstateV2ParseError> {
477 if self.has_entry() {
477 if self.has_entry() {
478 Ok(Some(self.assume_entry()?))
478 Ok(Some(self.assume_entry()?))
479 } else {
479 } else {
480 Ok(None)
480 Ok(None)
481 }
481 }
482 }
482 }
483
483
484 pub(super) fn children<'on_disk>(
484 pub(super) fn children<'on_disk>(
485 &self,
485 &self,
486 on_disk: &'on_disk [u8],
486 on_disk: &'on_disk [u8],
487 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
487 ) -> Result<&'on_disk [Node], DirstateV2ParseError> {
488 read_nodes(on_disk, self.children)
488 read_nodes(on_disk, self.children)
489 }
489 }
490
490
491 pub(super) fn to_in_memory_node<'on_disk>(
491 pub(super) fn to_in_memory_node<'on_disk>(
492 &self,
492 &self,
493 on_disk: &'on_disk [u8],
493 on_disk: &'on_disk [u8],
494 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
494 ) -> Result<dirstate_map::Node<'on_disk>, DirstateV2ParseError> {
495 Ok(dirstate_map::Node {
495 Ok(dirstate_map::Node {
496 children: dirstate_map::ChildNodes::OnDisk(
496 children: dirstate_map::ChildNodes::OnDisk(
497 self.children(on_disk)?,
497 self.children(on_disk)?,
498 ),
498 ),
499 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
499 copy_source: self.copy_source(on_disk)?.map(Cow::Borrowed),
500 data: self.node_data()?,
500 data: self.node_data()?,
501 descendants_with_entry_count: self
501 descendants_with_entry_count: self
502 .descendants_with_entry_count
502 .descendants_with_entry_count
503 .get(),
503 .get(),
504 tracked_descendants_count: self.tracked_descendants_count.get(),
504 tracked_descendants_count: self.tracked_descendants_count.get(),
505 })
505 })
506 }
506 }
507
507
508 fn from_dirstate_entry(
508 fn from_dirstate_entry(
509 entry: &DirstateEntry,
509 entry: &DirstateEntry,
510 ) -> (Flags, U32Be, PackedTruncatedTimestamp) {
510 ) -> (Flags, U32Be, PackedTruncatedTimestamp) {
511 let DirstateV2Data {
511 let DirstateV2Data {
512 wc_tracked,
512 wc_tracked,
513 p1_tracked,
513 p1_tracked,
514 p2_info,
514 p2_info,
515 mode_size: mode_size_opt,
515 mode_size: mode_size_opt,
516 mtime: mtime_opt,
516 mtime: mtime_opt,
517 fallback_exec,
517 fallback_exec,
518 fallback_symlink,
518 fallback_symlink,
519 } = entry.v2_data();
519 } = entry.v2_data();
520 // TODO: convert through raw flag bits instead?
520 // TODO: convert through raw flag bits instead?
521 let mut flags = Flags::empty();
521 let mut flags = Flags::empty();
522 flags.set(Flags::WDIR_TRACKED, wc_tracked);
522 flags.set(Flags::WDIR_TRACKED, wc_tracked);
523 flags.set(Flags::P1_TRACKED, p1_tracked);
523 flags.set(Flags::P1_TRACKED, p1_tracked);
524 flags.set(Flags::P2_INFO, p2_info);
524 flags.set(Flags::P2_INFO, p2_info);
525 let size = if let Some((m, s)) = mode_size_opt {
525 let size = if let Some((m, s)) = mode_size_opt {
526 let exec_perm = m & (libc::S_IXUSR as u32) != 0;
526 let exec_perm = m & (libc::S_IXUSR as u32) != 0;
527 let is_symlink = m & (libc::S_IFMT as u32) == libc::S_IFLNK as u32;
527 let is_symlink = m & (libc::S_IFMT as u32) == libc::S_IFLNK as u32;
528 flags.set(Flags::MODE_EXEC_PERM, exec_perm);
528 flags.set(Flags::MODE_EXEC_PERM, exec_perm);
529 flags.set(Flags::MODE_IS_SYMLINK, is_symlink);
529 flags.set(Flags::MODE_IS_SYMLINK, is_symlink);
530 flags.insert(Flags::HAS_MODE_AND_SIZE);
530 flags.insert(Flags::HAS_MODE_AND_SIZE);
531 s.into()
531 s.into()
532 } else {
532 } else {
533 0.into()
533 0.into()
534 };
534 };
535 let mtime = if let Some(m) = mtime_opt {
535 let mtime = if let Some(m) = mtime_opt {
536 flags.insert(Flags::HAS_MTIME);
536 flags.insert(Flags::HAS_MTIME);
537 if m.second_ambiguous {
537 if m.second_ambiguous {
538 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS);
538 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS);
539 };
539 };
540 m.into()
540 m.into()
541 } else {
541 } else {
542 PackedTruncatedTimestamp::null()
542 PackedTruncatedTimestamp::null()
543 };
543 };
544 if let Some(f_exec) = fallback_exec {
544 if let Some(f_exec) = fallback_exec {
545 flags.insert(Flags::HAS_FALLBACK_EXEC);
545 flags.insert(Flags::HAS_FALLBACK_EXEC);
546 if f_exec {
546 if f_exec {
547 flags.insert(Flags::FALLBACK_EXEC);
547 flags.insert(Flags::FALLBACK_EXEC);
548 }
548 }
549 }
549 }
550 if let Some(f_symlink) = fallback_symlink {
550 if let Some(f_symlink) = fallback_symlink {
551 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
551 flags.insert(Flags::HAS_FALLBACK_SYMLINK);
552 if f_symlink {
552 if f_symlink {
553 flags.insert(Flags::FALLBACK_SYMLINK);
553 flags.insert(Flags::FALLBACK_SYMLINK);
554 }
554 }
555 }
555 }
556 (flags, size, mtime)
556 (flags, size, mtime)
557 }
557 }
558 }
558 }
559
559
560 fn read_hg_path(
560 fn read_hg_path(
561 on_disk: &[u8],
561 on_disk: &[u8],
562 slice: PathSlice,
562 slice: PathSlice,
563 ) -> Result<&HgPath, DirstateV2ParseError> {
563 ) -> Result<&HgPath, DirstateV2ParseError> {
564 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
564 read_slice(on_disk, slice.start, slice.len.get()).map(HgPath::new)
565 }
565 }
566
566
567 fn read_nodes(
567 fn read_nodes(
568 on_disk: &[u8],
568 on_disk: &[u8],
569 slice: ChildNodes,
569 slice: ChildNodes,
570 ) -> Result<&[Node], DirstateV2ParseError> {
570 ) -> Result<&[Node], DirstateV2ParseError> {
571 read_slice(on_disk, slice.start, slice.len.get())
571 read_slice(on_disk, slice.start, slice.len.get())
572 }
572 }
573
573
574 fn read_slice<T, Len>(
574 fn read_slice<T, Len>(
575 on_disk: &[u8],
575 on_disk: &[u8],
576 start: Offset,
576 start: Offset,
577 len: Len,
577 len: Len,
578 ) -> Result<&[T], DirstateV2ParseError>
578 ) -> Result<&[T], DirstateV2ParseError>
579 where
579 where
580 T: BytesCast,
580 T: BytesCast,
581 Len: TryInto<usize>,
581 Len: TryInto<usize>,
582 {
582 {
583 // Either `usize::MAX` would result in "out of bounds" error since a single
583 // Either `usize::MAX` would result in "out of bounds" error since a single
584 // `&[u8]` cannot occupy the entire addess space.
584 // `&[u8]` cannot occupy the entire addess space.
585 let start = start.get().try_into().unwrap_or(std::usize::MAX);
585 let start = start.get().try_into().unwrap_or(std::usize::MAX);
586 let len = len.try_into().unwrap_or(std::usize::MAX);
586 let len = len.try_into().unwrap_or(std::usize::MAX);
587 let bytes = match on_disk.get(start..) {
587 let bytes = match on_disk.get(start..) {
588 Some(bytes) => bytes,
588 Some(bytes) => bytes,
589 None => {
589 None => {
590 return Err(DirstateV2ParseError::new(
590 return Err(DirstateV2ParseError::new(
591 "not enough bytes from disk",
591 "not enough bytes from disk",
592 ))
592 ))
593 }
593 }
594 };
594 };
595 T::slice_from_bytes(bytes, len)
595 T::slice_from_bytes(bytes, len)
596 .map_err(|e| {
596 .map_err(|e| {
597 DirstateV2ParseError::new(format!("when reading a slice, {}", e))
597 DirstateV2ParseError::new(format!("when reading a slice, {}", e))
598 })
598 })
599 .map(|(slice, _rest)| slice)
599 .map(|(slice, _rest)| slice)
600 }
600 }
601
601
602 pub(crate) fn for_each_tracked_path<'on_disk>(
603 on_disk: &'on_disk [u8],
604 metadata: &[u8],
605 mut f: impl FnMut(&'on_disk HgPath),
606 ) -> Result<(), DirstateV2ParseError> {
607 let (meta, _) = TreeMetadata::from_bytes(metadata).map_err(|e| {
608 DirstateV2ParseError::new(format!("when parsing tree metadata, {}", e))
609 })?;
610 fn recur<'on_disk>(
611 on_disk: &'on_disk [u8],
612 nodes: ChildNodes,
613 f: &mut impl FnMut(&'on_disk HgPath),
614 ) -> Result<(), DirstateV2ParseError> {
615 for node in read_nodes(on_disk, nodes)? {
616 if let Some(entry) = node.entry()? {
617 if entry.tracked() {
618 f(node.full_path(on_disk)?)
619 }
620 }
621 recur(on_disk, node.children, f)?
622 }
623 Ok(())
624 }
625 recur(on_disk, meta.root_nodes, &mut f)
626 }
627
628 /// Returns new data and metadata, together with whether that data should be
602 /// Returns new data and metadata, together with whether that data should be
629 /// appended to the existing data file whose content is at
603 /// appended to the existing data file whose content is at
630 /// `dirstate_map.on_disk` (true), instead of written to a new data file
604 /// `dirstate_map.on_disk` (true), instead of written to a new data file
631 /// (false), and the previous size of data on disk.
605 /// (false), and the previous size of data on disk.
632 pub(super) fn write(
606 pub(super) fn write(
633 dirstate_map: &DirstateMap,
607 dirstate_map: &DirstateMap,
634 can_append: bool,
608 can_append: bool,
635 ) -> Result<(Vec<u8>, TreeMetadata, bool, usize), DirstateError> {
609 ) -> Result<(Vec<u8>, TreeMetadata, bool, usize), DirstateError> {
636 let append = can_append && dirstate_map.write_should_append();
610 let append = can_append && dirstate_map.write_should_append();
637
611
638 // This ignores the space for paths, and for nodes without an entry.
612 // This ignores the space for paths, and for nodes without an entry.
639 // TODO: better estimate? Skip the `Vec` and write to a file directly?
613 // TODO: better estimate? Skip the `Vec` and write to a file directly?
640 let size_guess = std::mem::size_of::<Node>()
614 let size_guess = std::mem::size_of::<Node>()
641 * dirstate_map.nodes_with_entry_count as usize;
615 * dirstate_map.nodes_with_entry_count as usize;
642
616
643 let mut writer = Writer {
617 let mut writer = Writer {
644 dirstate_map,
618 dirstate_map,
645 append,
619 append,
646 out: Vec::with_capacity(size_guess),
620 out: Vec::with_capacity(size_guess),
647 };
621 };
648
622
649 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
623 let root_nodes = writer.write_nodes(dirstate_map.root.as_ref())?;
650
624
651 let unreachable_bytes = if append {
625 let unreachable_bytes = if append {
652 dirstate_map.unreachable_bytes
626 dirstate_map.unreachable_bytes
653 } else {
627 } else {
654 0
628 0
655 };
629 };
656 let meta = TreeMetadata {
630 let meta = TreeMetadata {
657 root_nodes,
631 root_nodes,
658 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
632 nodes_with_entry_count: dirstate_map.nodes_with_entry_count.into(),
659 nodes_with_copy_source_count: dirstate_map
633 nodes_with_copy_source_count: dirstate_map
660 .nodes_with_copy_source_count
634 .nodes_with_copy_source_count
661 .into(),
635 .into(),
662 unreachable_bytes: unreachable_bytes.into(),
636 unreachable_bytes: unreachable_bytes.into(),
663 unused: [0; 4],
637 unused: [0; 4],
664 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
638 ignore_patterns_hash: dirstate_map.ignore_patterns_hash,
665 };
639 };
666 Ok((writer.out, meta, append, dirstate_map.old_data_size))
640 Ok((writer.out, meta, append, dirstate_map.old_data_size))
667 }
641 }
668
642
669 struct Writer<'dmap, 'on_disk> {
643 struct Writer<'dmap, 'on_disk> {
670 dirstate_map: &'dmap DirstateMap<'on_disk>,
644 dirstate_map: &'dmap DirstateMap<'on_disk>,
671 append: bool,
645 append: bool,
672 out: Vec<u8>,
646 out: Vec<u8>,
673 }
647 }
674
648
675 impl Writer<'_, '_> {
649 impl Writer<'_, '_> {
676 fn write_nodes(
650 fn write_nodes(
677 &mut self,
651 &mut self,
678 nodes: dirstate_map::ChildNodesRef,
652 nodes: dirstate_map::ChildNodesRef,
679 ) -> Result<ChildNodes, DirstateError> {
653 ) -> Result<ChildNodes, DirstateError> {
680 // Reuse already-written nodes if possible
654 // Reuse already-written nodes if possible
681 if self.append {
655 if self.append {
682 if let dirstate_map::ChildNodesRef::OnDisk(nodes_slice) = nodes {
656 if let dirstate_map::ChildNodesRef::OnDisk(nodes_slice) = nodes {
683 let start = self.on_disk_offset_of(nodes_slice).expect(
657 let start = self.on_disk_offset_of(nodes_slice).expect(
684 "dirstate-v2 OnDisk nodes not found within on_disk",
658 "dirstate-v2 OnDisk nodes not found within on_disk",
685 );
659 );
686 let len = child_nodes_len_from_usize(nodes_slice.len());
660 let len = child_nodes_len_from_usize(nodes_slice.len());
687 return Ok(ChildNodes { start, len });
661 return Ok(ChildNodes { start, len });
688 }
662 }
689 }
663 }
690
664
691 // `dirstate_map::ChildNodes::InMemory` contains a `HashMap` which has
665 // `dirstate_map::ChildNodes::InMemory` contains a `HashMap` which has
692 // undefined iteration order. Sort to enable binary search in the
666 // undefined iteration order. Sort to enable binary search in the
693 // written file.
667 // written file.
694 let nodes = nodes.sorted();
668 let nodes = nodes.sorted();
695 let nodes_len = nodes.len();
669 let nodes_len = nodes.len();
696
670
697 // First accumulate serialized nodes in a `Vec`
671 // First accumulate serialized nodes in a `Vec`
698 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
672 let mut on_disk_nodes = Vec::with_capacity(nodes_len);
699 for node in nodes {
673 for node in nodes {
700 let children =
674 let children =
701 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
675 self.write_nodes(node.children(self.dirstate_map.on_disk)?)?;
702 let full_path = node.full_path(self.dirstate_map.on_disk)?;
676 let full_path = node.full_path(self.dirstate_map.on_disk)?;
703 let full_path = self.write_path(full_path.as_bytes());
677 let full_path = self.write_path(full_path.as_bytes());
704 let copy_source = if let Some(source) =
678 let copy_source = if let Some(source) =
705 node.copy_source(self.dirstate_map.on_disk)?
679 node.copy_source(self.dirstate_map.on_disk)?
706 {
680 {
707 self.write_path(source.as_bytes())
681 self.write_path(source.as_bytes())
708 } else {
682 } else {
709 PathSlice {
683 PathSlice {
710 start: 0.into(),
684 start: 0.into(),
711 len: 0.into(),
685 len: 0.into(),
712 }
686 }
713 };
687 };
714 on_disk_nodes.push(match node {
688 on_disk_nodes.push(match node {
715 NodeRef::InMemory(path, node) => {
689 NodeRef::InMemory(path, node) => {
716 let (flags, size, mtime) = match &node.data {
690 let (flags, size, mtime) = match &node.data {
717 dirstate_map::NodeData::Entry(entry) => {
691 dirstate_map::NodeData::Entry(entry) => {
718 Node::from_dirstate_entry(entry)
692 Node::from_dirstate_entry(entry)
719 }
693 }
720 dirstate_map::NodeData::CachedDirectory { mtime } => {
694 dirstate_map::NodeData::CachedDirectory { mtime } => {
721 // we currently never set a mtime if unknown file
695 // we currently never set a mtime if unknown file
722 // are present.
696 // are present.
723 // So if we have a mtime for a directory, we know
697 // So if we have a mtime for a directory, we know
724 // they are no unknown
698 // they are no unknown
725 // files and we
699 // files and we
726 // blindly set ALL_UNKNOWN_RECORDED.
700 // blindly set ALL_UNKNOWN_RECORDED.
727 //
701 //
728 // We never set ALL_IGNORED_RECORDED since we
702 // We never set ALL_IGNORED_RECORDED since we
729 // don't track that case
703 // don't track that case
730 // currently.
704 // currently.
731 let mut flags = Flags::DIRECTORY
705 let mut flags = Flags::DIRECTORY
732 | Flags::HAS_MTIME
706 | Flags::HAS_MTIME
733 | Flags::ALL_UNKNOWN_RECORDED;
707 | Flags::ALL_UNKNOWN_RECORDED;
734 if mtime.second_ambiguous {
708 if mtime.second_ambiguous {
735 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS)
709 flags.insert(Flags::MTIME_SECOND_AMBIGUOUS)
736 }
710 }
737 (flags, 0.into(), (*mtime).into())
711 (flags, 0.into(), (*mtime).into())
738 }
712 }
739 dirstate_map::NodeData::None => (
713 dirstate_map::NodeData::None => (
740 Flags::DIRECTORY,
714 Flags::DIRECTORY,
741 0.into(),
715 0.into(),
742 PackedTruncatedTimestamp::null(),
716 PackedTruncatedTimestamp::null(),
743 ),
717 ),
744 };
718 };
745 Node {
719 Node {
746 children,
720 children,
747 copy_source,
721 copy_source,
748 full_path,
722 full_path,
749 base_name_start: u16::try_from(path.base_name_start())
723 base_name_start: u16::try_from(path.base_name_start())
750 // Could only panic for paths over 64 KiB
724 // Could only panic for paths over 64 KiB
751 .expect("dirstate-v2 path length overflow")
725 .expect("dirstate-v2 path length overflow")
752 .into(),
726 .into(),
753 descendants_with_entry_count: node
727 descendants_with_entry_count: node
754 .descendants_with_entry_count
728 .descendants_with_entry_count
755 .into(),
729 .into(),
756 tracked_descendants_count: node
730 tracked_descendants_count: node
757 .tracked_descendants_count
731 .tracked_descendants_count
758 .into(),
732 .into(),
759 flags: flags.bits().into(),
733 flags: flags.bits().into(),
760 size,
734 size,
761 mtime,
735 mtime,
762 }
736 }
763 }
737 }
764 NodeRef::OnDisk(node) => Node {
738 NodeRef::OnDisk(node) => Node {
765 children,
739 children,
766 copy_source,
740 copy_source,
767 full_path,
741 full_path,
768 ..*node
742 ..*node
769 },
743 },
770 })
744 })
771 }
745 }
772 // … so we can write them contiguously, after writing everything else
746 // … so we can write them contiguously, after writing everything else
773 // they refer to.
747 // they refer to.
774 let start = self.current_offset();
748 let start = self.current_offset();
775 let len = child_nodes_len_from_usize(nodes_len);
749 let len = child_nodes_len_from_usize(nodes_len);
776 self.out.extend(on_disk_nodes.as_bytes());
750 self.out.extend(on_disk_nodes.as_bytes());
777 Ok(ChildNodes { start, len })
751 Ok(ChildNodes { start, len })
778 }
752 }
779
753
780 /// If the given slice of items is within `on_disk`, returns its offset
754 /// If the given slice of items is within `on_disk`, returns its offset
781 /// from the start of `on_disk`.
755 /// from the start of `on_disk`.
782 fn on_disk_offset_of<T>(&self, slice: &[T]) -> Option<Offset>
756 fn on_disk_offset_of<T>(&self, slice: &[T]) -> Option<Offset>
783 where
757 where
784 T: BytesCast,
758 T: BytesCast,
785 {
759 {
786 fn address_range(slice: &[u8]) -> std::ops::RangeInclusive<usize> {
760 fn address_range(slice: &[u8]) -> std::ops::RangeInclusive<usize> {
787 let start = slice.as_ptr() as usize;
761 let start = slice.as_ptr() as usize;
788 let end = start + slice.len();
762 let end = start + slice.len();
789 start..=end
763 start..=end
790 }
764 }
791 let slice_addresses = address_range(slice.as_bytes());
765 let slice_addresses = address_range(slice.as_bytes());
792 let on_disk_addresses = address_range(self.dirstate_map.on_disk);
766 let on_disk_addresses = address_range(self.dirstate_map.on_disk);
793 if on_disk_addresses.contains(slice_addresses.start())
767 if on_disk_addresses.contains(slice_addresses.start())
794 && on_disk_addresses.contains(slice_addresses.end())
768 && on_disk_addresses.contains(slice_addresses.end())
795 {
769 {
796 let offset = slice_addresses.start() - on_disk_addresses.start();
770 let offset = slice_addresses.start() - on_disk_addresses.start();
797 Some(offset_from_usize(offset))
771 Some(offset_from_usize(offset))
798 } else {
772 } else {
799 None
773 None
800 }
774 }
801 }
775 }
802
776
803 fn current_offset(&mut self) -> Offset {
777 fn current_offset(&mut self) -> Offset {
804 let mut offset = self.out.len();
778 let mut offset = self.out.len();
805 if self.append {
779 if self.append {
806 offset += self.dirstate_map.on_disk.len()
780 offset += self.dirstate_map.on_disk.len()
807 }
781 }
808 offset_from_usize(offset)
782 offset_from_usize(offset)
809 }
783 }
810
784
811 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
785 fn write_path(&mut self, slice: &[u8]) -> PathSlice {
812 let len = path_len_from_usize(slice.len());
786 let len = path_len_from_usize(slice.len());
813 // Reuse an already-written path if possible
787 // Reuse an already-written path if possible
814 if self.append {
788 if self.append {
815 if let Some(start) = self.on_disk_offset_of(slice) {
789 if let Some(start) = self.on_disk_offset_of(slice) {
816 return PathSlice { start, len };
790 return PathSlice { start, len };
817 }
791 }
818 }
792 }
819 let start = self.current_offset();
793 let start = self.current_offset();
820 self.out.extend(slice.as_bytes());
794 self.out.extend(slice.as_bytes());
821 PathSlice { start, len }
795 PathSlice { start, len }
822 }
796 }
823 }
797 }
824
798
825 fn offset_from_usize(x: usize) -> Offset {
799 fn offset_from_usize(x: usize) -> Offset {
826 u32::try_from(x)
800 u32::try_from(x)
827 // Could only panic for a dirstate file larger than 4 GiB
801 // Could only panic for a dirstate file larger than 4 GiB
828 .expect("dirstate-v2 offset overflow")
802 .expect("dirstate-v2 offset overflow")
829 .into()
803 .into()
830 }
804 }
831
805
832 fn child_nodes_len_from_usize(x: usize) -> Size {
806 fn child_nodes_len_from_usize(x: usize) -> Size {
833 u32::try_from(x)
807 u32::try_from(x)
834 // Could only panic with over 4 billion nodes
808 // Could only panic with over 4 billion nodes
835 .expect("dirstate-v2 slice length overflow")
809 .expect("dirstate-v2 slice length overflow")
836 .into()
810 .into()
837 }
811 }
838
812
839 fn path_len_from_usize(x: usize) -> PathSize {
813 fn path_len_from_usize(x: usize) -> PathSize {
840 u16::try_from(x)
814 u16::try_from(x)
841 // Could only panic for paths over 64 KiB
815 // Could only panic for paths over 64 KiB
842 .expect("dirstate-v2 path length overflow")
816 .expect("dirstate-v2 path length overflow")
843 .into()
817 .into()
844 }
818 }
845
819
846 impl From<TruncatedTimestamp> for PackedTruncatedTimestamp {
820 impl From<TruncatedTimestamp> for PackedTruncatedTimestamp {
847 fn from(timestamp: TruncatedTimestamp) -> Self {
821 fn from(timestamp: TruncatedTimestamp) -> Self {
848 Self {
822 Self {
849 truncated_seconds: timestamp.truncated_seconds().into(),
823 truncated_seconds: timestamp.truncated_seconds().into(),
850 nanoseconds: timestamp.nanoseconds().into(),
824 nanoseconds: timestamp.nanoseconds().into(),
851 }
825 }
852 }
826 }
853 }
827 }
854
828
855 impl TryFrom<PackedTruncatedTimestamp> for TruncatedTimestamp {
829 impl TryFrom<PackedTruncatedTimestamp> for TruncatedTimestamp {
856 type Error = DirstateV2ParseError;
830 type Error = DirstateV2ParseError;
857
831
858 fn try_from(
832 fn try_from(
859 timestamp: PackedTruncatedTimestamp,
833 timestamp: PackedTruncatedTimestamp,
860 ) -> Result<Self, Self::Error> {
834 ) -> Result<Self, Self::Error> {
861 Self::from_already_truncated(
835 Self::from_already_truncated(
862 timestamp.truncated_seconds.get(),
836 timestamp.truncated_seconds.get(),
863 timestamp.nanoseconds.get(),
837 timestamp.nanoseconds.get(),
864 false,
838 false,
865 )
839 )
866 }
840 }
867 }
841 }
868 impl PackedTruncatedTimestamp {
842 impl PackedTruncatedTimestamp {
869 fn null() -> Self {
843 fn null() -> Self {
870 Self {
844 Self {
871 truncated_seconds: 0.into(),
845 truncated_seconds: 0.into(),
872 nanoseconds: 0.into(),
846 nanoseconds: 0.into(),
873 }
847 }
874 }
848 }
875 }
849 }
@@ -1,82 +1,29 b''
1 // list_tracked_files.rs
1 // list_tracked_files.rs
2 //
2 //
3 // Copyright 2020 Antoine Cezar <antoine.cezar@octobus.net>
3 // Copyright 2020 Antoine Cezar <antoine.cezar@octobus.net>
4 //
4 //
5 // This software may be used and distributed according to the terms of the
5 // This software may be used and distributed according to the terms of the
6 // GNU General Public License version 2 or any later version.
6 // GNU General Public License version 2 or any later version.
7
7
8 use crate::dirstate::parsers::parse_dirstate_entries;
9 use crate::dirstate_tree::on_disk::{for_each_tracked_path, read_docket};
10 use crate::errors::HgError;
8 use crate::errors::HgError;
11 use crate::repo::Repo;
9 use crate::repo::Repo;
12 use crate::revlog::manifest::Manifest;
10 use crate::revlog::manifest::Manifest;
13 use crate::revlog::RevlogError;
11 use crate::revlog::RevlogError;
14 use crate::utils::hg_path::HgPath;
12 use crate::utils::hg_path::HgPath;
15 use crate::DirstateError;
16 use rayon::prelude::*;
17
18 /// List files under Mercurial control in the working directory
19 /// by reading the dirstate
20 pub struct Dirstate {
21 /// The `dirstate` content.
22 content: Vec<u8>,
23 v2_metadata: Option<Vec<u8>>,
24 }
25
26 impl Dirstate {
27 pub fn new(repo: &Repo) -> Result<Self, HgError> {
28 let mut content = repo.hg_vfs().read("dirstate")?;
29 let v2_metadata = if repo.has_dirstate_v2() {
30 let docket = read_docket(&content)?;
31 let meta = docket.tree_metadata().to_vec();
32 content = repo.hg_vfs().read(docket.data_filename())?;
33 Some(meta)
34 } else {
35 None
36 };
37 Ok(Self {
38 content,
39 v2_metadata,
40 })
41 }
42
43 pub fn tracked_files(&self) -> Result<Vec<&HgPath>, DirstateError> {
44 let mut files = Vec::new();
45 if !self.content.is_empty() {
46 if let Some(meta) = &self.v2_metadata {
47 for_each_tracked_path(&self.content, meta, |path| {
48 files.push(path)
49 })?
50 } else {
51 let _parents = parse_dirstate_entries(
52 &self.content,
53 |path, entry, _copy_source| {
54 if entry.tracked() {
55 files.push(path)
56 }
57 Ok(())
58 },
59 )?;
60 }
61 }
62 files.par_sort_unstable();
63 Ok(files)
64 }
65 }
66
13
67 /// List files under Mercurial control at a given revision.
14 /// List files under Mercurial control at a given revision.
68 pub fn list_rev_tracked_files(
15 pub fn list_rev_tracked_files(
69 repo: &Repo,
16 repo: &Repo,
70 revset: &str,
17 revset: &str,
71 ) -> Result<FilesForRev, RevlogError> {
18 ) -> Result<FilesForRev, RevlogError> {
72 let rev = crate::revset::resolve_single(revset, repo)?;
19 let rev = crate::revset::resolve_single(revset, repo)?;
73 Ok(FilesForRev(repo.manifest_for_rev(rev)?))
20 Ok(FilesForRev(repo.manifest_for_rev(rev)?))
74 }
21 }
75
22
76 pub struct FilesForRev(Manifest);
23 pub struct FilesForRev(Manifest);
77
24
78 impl FilesForRev {
25 impl FilesForRev {
79 pub fn iter(&self) -> impl Iterator<Item = Result<&HgPath, HgError>> {
26 pub fn iter(&self) -> impl Iterator<Item = Result<&HgPath, HgError>> {
80 self.0.iter().map(|entry| Ok(entry?.path))
27 self.0.iter().map(|entry| Ok(entry?.path))
81 }
28 }
82 }
29 }
@@ -1,11 +1,10 b''
1 //! A distinction is made between operations and commands.
1 //! A distinction is made between operations and commands.
2 //! An operation is what can be done whereas a command is what is exposed by
2 //! An operation is what can be done whereas a command is what is exposed by
3 //! the cli. A single command can use several operations to achieve its goal.
3 //! the cli. A single command can use several operations to achieve its goal.
4
4
5 mod cat;
5 mod cat;
6 mod debugdata;
6 mod debugdata;
7 mod list_tracked_files;
7 mod list_tracked_files;
8 pub use cat::{cat, CatOutput};
8 pub use cat::{cat, CatOutput};
9 pub use debugdata::{debug_data, DebugDataKind};
9 pub use debugdata::{debug_data, DebugDataKind};
10 pub use list_tracked_files::Dirstate;
11 pub use list_tracked_files::{list_rev_tracked_files, FilesForRev};
10 pub use list_tracked_files::{list_rev_tracked_files, FilesForRev};
@@ -1,101 +1,110 b''
1 use crate::error::CommandError;
1 use crate::error::CommandError;
2 use crate::ui::Ui;
2 use crate::ui::Ui;
3 use crate::utils::path_utils::RelativizePaths;
3 use crate::utils::path_utils::RelativizePaths;
4 use clap::Arg;
4 use clap::Arg;
5 use hg::errors::HgError;
5 use hg::errors::HgError;
6 use hg::operations::list_rev_tracked_files;
6 use hg::operations::list_rev_tracked_files;
7 use hg::operations::Dirstate;
8 use hg::repo::Repo;
7 use hg::repo::Repo;
8 use hg::utils::filter_map_results;
9 use hg::utils::hg_path::HgPath;
9 use hg::utils::hg_path::HgPath;
10 use rayon::prelude::*;
10
11
11 pub const HELP_TEXT: &str = "
12 pub const HELP_TEXT: &str = "
12 List tracked files.
13 List tracked files.
13
14
14 Returns 0 on success.
15 Returns 0 on success.
15 ";
16 ";
16
17
17 pub fn args() -> clap::Command {
18 pub fn args() -> clap::Command {
18 clap::command!("files")
19 clap::command!("files")
19 .arg(
20 .arg(
20 Arg::new("rev")
21 Arg::new("rev")
21 .help("search the repository as it is in REV")
22 .help("search the repository as it is in REV")
22 .short('r')
23 .short('r')
23 .long("revision")
24 .long("revision")
24 .value_name("REV"),
25 .value_name("REV"),
25 )
26 )
26 .about(HELP_TEXT)
27 .about(HELP_TEXT)
27 }
28 }
28
29
29 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
30 pub fn run(invocation: &crate::CliInvocation) -> Result<(), CommandError> {
30 let relative = invocation.config.get(b"ui", b"relative-paths");
31 let relative = invocation.config.get(b"ui", b"relative-paths");
31 if relative.is_some() {
32 if relative.is_some() {
32 return Err(CommandError::unsupported(
33 return Err(CommandError::unsupported(
33 "non-default ui.relative-paths",
34 "non-default ui.relative-paths",
34 ));
35 ));
35 }
36 }
36
37
37 let rev = invocation.subcommand_args.get_one::<String>("rev");
38 let rev = invocation.subcommand_args.get_one::<String>("rev");
38
39
39 let repo = invocation.repo?;
40 let repo = invocation.repo?;
40
41
41 // It seems better if this check is removed: this would correspond to
42 // It seems better if this check is removed: this would correspond to
42 // automatically enabling the extension if the repo requires it.
43 // automatically enabling the extension if the repo requires it.
43 // However we need this check to be in sync with vanilla hg so hg tests
44 // However we need this check to be in sync with vanilla hg so hg tests
44 // pass.
45 // pass.
45 if repo.has_sparse()
46 if repo.has_sparse()
46 && invocation.config.get(b"extensions", b"sparse").is_none()
47 && invocation.config.get(b"extensions", b"sparse").is_none()
47 {
48 {
48 return Err(CommandError::unsupported(
49 return Err(CommandError::unsupported(
49 "repo is using sparse, but sparse extension is not enabled",
50 "repo is using sparse, but sparse extension is not enabled",
50 ));
51 ));
51 }
52 }
52
53
53 if let Some(rev) = rev {
54 if let Some(rev) = rev {
54 if repo.has_narrow() {
55 if repo.has_narrow() {
55 return Err(CommandError::unsupported(
56 return Err(CommandError::unsupported(
56 "rhg files -r <rev> is not supported in narrow clones",
57 "rhg files -r <rev> is not supported in narrow clones",
57 ));
58 ));
58 }
59 }
59 let files = list_rev_tracked_files(repo, rev)
60 let files = list_rev_tracked_files(repo, rev)
60 .map_err(|e| (e, rev.as_ref()))?;
61 .map_err(|e| (e, rev.as_ref()))?;
61 display_files(invocation.ui, repo, files.iter())
62 display_files(invocation.ui, repo, files.iter())
62 } else {
63 } else {
63 // The dirstate always reflects the sparse narrowspec, so if
64 // The dirstate always reflects the sparse narrowspec, so if
64 // we only have sparse without narrow all is fine.
65 // we only have sparse without narrow all is fine.
65 // If we have narrow, then [hg files] needs to check if
66 // If we have narrow, then [hg files] needs to check if
66 // the store narrowspec is in sync with the one of the dirstate,
67 // the store narrowspec is in sync with the one of the dirstate,
67 // so we can't support that without explicit code.
68 // so we can't support that without explicit code.
68 if repo.has_narrow() {
69 if repo.has_narrow() {
69 return Err(CommandError::unsupported(
70 return Err(CommandError::unsupported(
70 "rhg files is not supported in narrow clones",
71 "rhg files is not supported in narrow clones",
71 ));
72 ));
72 }
73 }
73 let dirstate = Dirstate::new(repo)?;
74 let dirstate = repo.dirstate_map()?;
74 let files = dirstate.tracked_files()?;
75 let files_res: Result<Vec<_>, _> =
76 filter_map_results(dirstate.iter(), |(path, entry)| {
77 Ok(if entry.tracked() { Some(path) } else { None })
78 })
79 .collect();
80
81 let mut files = files_res?;
82 files.par_sort_unstable();
83
75 display_files(invocation.ui, repo, files.into_iter().map(Ok))
84 display_files(invocation.ui, repo, files.into_iter().map(Ok))
76 }
85 }
77 }
86 }
78
87
79 fn display_files<'a>(
88 fn display_files<'a>(
80 ui: &Ui,
89 ui: &Ui,
81 repo: &Repo,
90 repo: &Repo,
82 files: impl IntoIterator<Item = Result<&'a HgPath, HgError>>,
91 files: impl IntoIterator<Item = Result<&'a HgPath, HgError>>,
83 ) -> Result<(), CommandError> {
92 ) -> Result<(), CommandError> {
84 let mut stdout = ui.stdout_buffer();
93 let mut stdout = ui.stdout_buffer();
85 let mut any = false;
94 let mut any = false;
86
95
87 let relativize = RelativizePaths::new(repo)?;
96 let relativize = RelativizePaths::new(repo)?;
88 for result in files {
97 for result in files {
89 let path = result?;
98 let path = result?;
90 stdout.write_all(&relativize.relativize(path))?;
99 stdout.write_all(&relativize.relativize(path))?;
91 stdout.write_all(b"\n")?;
100 stdout.write_all(b"\n")?;
92 any = true;
101 any = true;
93 }
102 }
94
103
95 stdout.flush()?;
104 stdout.flush()?;
96 if any {
105 if any {
97 Ok(())
106 Ok(())
98 } else {
107 } else {
99 Err(CommandError::Unsuccessful)
108 Err(CommandError::Unsuccessful)
100 }
109 }
101 }
110 }
General Comments 0
You need to be logged in to leave comments. Login now