##// END OF EJS Templates
rust-dirstate: rename `has_dirstate_v2` to `use_dirstate_v2`...
Raphaël Gomès -
r51551:1e2c6cda stable
parent child Browse files
Show More
@@ -1,736 +1,745 b''
1 use crate::changelog::Changelog;
1 use crate::changelog::Changelog;
2 use crate::config::{Config, ConfigError, ConfigParseError};
2 use crate::config::{Config, ConfigError, ConfigParseError};
3 use crate::dirstate::DirstateParents;
3 use crate::dirstate::DirstateParents;
4 use crate::dirstate_tree::dirstate_map::DirstateMapWriteMode;
4 use crate::dirstate_tree::dirstate_map::DirstateMapWriteMode;
5 use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
5 use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
6 use crate::dirstate_tree::owning::OwningDirstateMap;
6 use crate::dirstate_tree::owning::OwningDirstateMap;
7 use crate::errors::HgResultExt;
7 use crate::errors::HgResultExt;
8 use crate::errors::{HgError, IoResultExt};
8 use crate::errors::{HgError, IoResultExt};
9 use crate::lock::{try_with_lock_no_wait, LockError};
9 use crate::lock::{try_with_lock_no_wait, LockError};
10 use crate::manifest::{Manifest, Manifestlog};
10 use crate::manifest::{Manifest, Manifestlog};
11 use crate::revlog::filelog::Filelog;
11 use crate::revlog::filelog::Filelog;
12 use crate::revlog::RevlogError;
12 use crate::revlog::RevlogError;
13 use crate::utils::debug::debug_wait_for_file_or_print;
13 use crate::utils::debug::debug_wait_for_file_or_print;
14 use crate::utils::files::get_path_from_bytes;
14 use crate::utils::files::get_path_from_bytes;
15 use crate::utils::hg_path::HgPath;
15 use crate::utils::hg_path::HgPath;
16 use crate::utils::SliceExt;
16 use crate::utils::SliceExt;
17 use crate::vfs::{is_dir, is_file, Vfs};
17 use crate::vfs::{is_dir, is_file, Vfs};
18 use crate::{requirements, NodePrefix};
18 use crate::{requirements, NodePrefix};
19 use crate::{DirstateError, Revision};
19 use crate::{DirstateError, Revision};
20 use std::cell::{Ref, RefCell, RefMut};
20 use std::cell::{Ref, RefCell, RefMut};
21 use std::collections::HashSet;
21 use std::collections::HashSet;
22 use std::io::Seek;
22 use std::io::Seek;
23 use std::io::SeekFrom;
23 use std::io::SeekFrom;
24 use std::io::Write as IoWrite;
24 use std::io::Write as IoWrite;
25 use std::path::{Path, PathBuf};
25 use std::path::{Path, PathBuf};
26
26
27 const V2_MAX_READ_ATTEMPTS: usize = 5;
27 const V2_MAX_READ_ATTEMPTS: usize = 5;
28
28
29 type DirstateMapIdentity = (Option<u64>, Option<Vec<u8>>, usize);
29 type DirstateMapIdentity = (Option<u64>, Option<Vec<u8>>, usize);
30
30
31 /// A repository on disk
31 /// A repository on disk
32 pub struct Repo {
32 pub struct Repo {
33 working_directory: PathBuf,
33 working_directory: PathBuf,
34 dot_hg: PathBuf,
34 dot_hg: PathBuf,
35 store: PathBuf,
35 store: PathBuf,
36 requirements: HashSet<String>,
36 requirements: HashSet<String>,
37 config: Config,
37 config: Config,
38 dirstate_parents: LazyCell<DirstateParents>,
38 dirstate_parents: LazyCell<DirstateParents>,
39 dirstate_map: LazyCell<OwningDirstateMap>,
39 dirstate_map: LazyCell<OwningDirstateMap>,
40 changelog: LazyCell<Changelog>,
40 changelog: LazyCell<Changelog>,
41 manifestlog: LazyCell<Manifestlog>,
41 manifestlog: LazyCell<Manifestlog>,
42 }
42 }
43
43
44 #[derive(Debug, derive_more::From)]
44 #[derive(Debug, derive_more::From)]
45 pub enum RepoError {
45 pub enum RepoError {
46 NotFound {
46 NotFound {
47 at: PathBuf,
47 at: PathBuf,
48 },
48 },
49 #[from]
49 #[from]
50 ConfigParseError(ConfigParseError),
50 ConfigParseError(ConfigParseError),
51 #[from]
51 #[from]
52 Other(HgError),
52 Other(HgError),
53 }
53 }
54
54
55 impl From<ConfigError> for RepoError {
55 impl From<ConfigError> for RepoError {
56 fn from(error: ConfigError) -> Self {
56 fn from(error: ConfigError) -> Self {
57 match error {
57 match error {
58 ConfigError::Parse(error) => error.into(),
58 ConfigError::Parse(error) => error.into(),
59 ConfigError::Other(error) => error.into(),
59 ConfigError::Other(error) => error.into(),
60 }
60 }
61 }
61 }
62 }
62 }
63
63
64 impl Repo {
64 impl Repo {
65 /// tries to find nearest repository root in current working directory or
65 /// tries to find nearest repository root in current working directory or
66 /// its ancestors
66 /// its ancestors
67 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
67 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
68 let current_directory = crate::utils::current_dir()?;
68 let current_directory = crate::utils::current_dir()?;
69 // ancestors() is inclusive: it first yields `current_directory`
69 // ancestors() is inclusive: it first yields `current_directory`
70 // as-is.
70 // as-is.
71 for ancestor in current_directory.ancestors() {
71 for ancestor in current_directory.ancestors() {
72 if is_dir(ancestor.join(".hg"))? {
72 if is_dir(ancestor.join(".hg"))? {
73 return Ok(ancestor.to_path_buf());
73 return Ok(ancestor.to_path_buf());
74 }
74 }
75 }
75 }
76 Err(RepoError::NotFound {
76 Err(RepoError::NotFound {
77 at: current_directory,
77 at: current_directory,
78 })
78 })
79 }
79 }
80
80
81 /// Find a repository, either at the given path (which must contain a `.hg`
81 /// Find a repository, either at the given path (which must contain a `.hg`
82 /// sub-directory) or by searching the current directory and its
82 /// sub-directory) or by searching the current directory and its
83 /// ancestors.
83 /// ancestors.
84 ///
84 ///
85 /// A method with two very different "modes" like this usually a code smell
85 /// A method with two very different "modes" like this usually a code smell
86 /// to make two methods instead, but in this case an `Option` is what rhg
86 /// to make two methods instead, but in this case an `Option` is what rhg
87 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
87 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
88 /// Having two methods would just move that `if` to almost all callers.
88 /// Having two methods would just move that `if` to almost all callers.
89 pub fn find(
89 pub fn find(
90 config: &Config,
90 config: &Config,
91 explicit_path: Option<PathBuf>,
91 explicit_path: Option<PathBuf>,
92 ) -> Result<Self, RepoError> {
92 ) -> Result<Self, RepoError> {
93 if let Some(root) = explicit_path {
93 if let Some(root) = explicit_path {
94 if is_dir(root.join(".hg"))? {
94 if is_dir(root.join(".hg"))? {
95 Self::new_at_path(root, config)
95 Self::new_at_path(root, config)
96 } else if is_file(&root)? {
96 } else if is_file(&root)? {
97 Err(HgError::unsupported("bundle repository").into())
97 Err(HgError::unsupported("bundle repository").into())
98 } else {
98 } else {
99 Err(RepoError::NotFound { at: root })
99 Err(RepoError::NotFound { at: root })
100 }
100 }
101 } else {
101 } else {
102 let root = Self::find_repo_root()?;
102 let root = Self::find_repo_root()?;
103 Self::new_at_path(root, config)
103 Self::new_at_path(root, config)
104 }
104 }
105 }
105 }
106
106
107 /// To be called after checking that `.hg` is a sub-directory
107 /// To be called after checking that `.hg` is a sub-directory
108 fn new_at_path(
108 fn new_at_path(
109 working_directory: PathBuf,
109 working_directory: PathBuf,
110 config: &Config,
110 config: &Config,
111 ) -> Result<Self, RepoError> {
111 ) -> Result<Self, RepoError> {
112 let dot_hg = working_directory.join(".hg");
112 let dot_hg = working_directory.join(".hg");
113
113
114 let mut repo_config_files =
114 let mut repo_config_files =
115 vec![dot_hg.join("hgrc"), dot_hg.join("hgrc-not-shared")];
115 vec![dot_hg.join("hgrc"), dot_hg.join("hgrc-not-shared")];
116
116
117 let hg_vfs = Vfs { base: &dot_hg };
117 let hg_vfs = Vfs { base: &dot_hg };
118 let mut reqs = requirements::load_if_exists(hg_vfs)?;
118 let mut reqs = requirements::load_if_exists(hg_vfs)?;
119 let relative =
119 let relative =
120 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
120 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
121 let shared =
121 let shared =
122 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
122 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
123
123
124 // From `mercurial/localrepo.py`:
124 // From `mercurial/localrepo.py`:
125 //
125 //
126 // if .hg/requires contains the sharesafe requirement, it means
126 // if .hg/requires contains the sharesafe requirement, it means
127 // there exists a `.hg/store/requires` too and we should read it
127 // there exists a `.hg/store/requires` too and we should read it
128 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
128 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
129 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
129 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
130 // is not present, refer checkrequirementscompat() for that
130 // is not present, refer checkrequirementscompat() for that
131 //
131 //
132 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
132 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
133 // repository was shared the old way. We check the share source
133 // repository was shared the old way. We check the share source
134 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
134 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
135 // current repository needs to be reshared
135 // current repository needs to be reshared
136 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
136 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
137
137
138 let store_path;
138 let store_path;
139 if !shared {
139 if !shared {
140 store_path = dot_hg.join("store");
140 store_path = dot_hg.join("store");
141 } else {
141 } else {
142 let bytes = hg_vfs.read("sharedpath")?;
142 let bytes = hg_vfs.read("sharedpath")?;
143 let mut shared_path =
143 let mut shared_path =
144 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
144 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
145 .to_owned();
145 .to_owned();
146 if relative {
146 if relative {
147 shared_path = dot_hg.join(shared_path)
147 shared_path = dot_hg.join(shared_path)
148 }
148 }
149 if !is_dir(&shared_path)? {
149 if !is_dir(&shared_path)? {
150 return Err(HgError::corrupted(format!(
150 return Err(HgError::corrupted(format!(
151 ".hg/sharedpath points to nonexistent directory {}",
151 ".hg/sharedpath points to nonexistent directory {}",
152 shared_path.display()
152 shared_path.display()
153 ))
153 ))
154 .into());
154 .into());
155 }
155 }
156
156
157 store_path = shared_path.join("store");
157 store_path = shared_path.join("store");
158
158
159 let source_is_share_safe =
159 let source_is_share_safe =
160 requirements::load(Vfs { base: &shared_path })?
160 requirements::load(Vfs { base: &shared_path })?
161 .contains(requirements::SHARESAFE_REQUIREMENT);
161 .contains(requirements::SHARESAFE_REQUIREMENT);
162
162
163 if share_safe != source_is_share_safe {
163 if share_safe != source_is_share_safe {
164 return Err(HgError::unsupported("share-safe mismatch").into());
164 return Err(HgError::unsupported("share-safe mismatch").into());
165 }
165 }
166
166
167 if share_safe {
167 if share_safe {
168 repo_config_files.insert(0, shared_path.join("hgrc"))
168 repo_config_files.insert(0, shared_path.join("hgrc"))
169 }
169 }
170 }
170 }
171 if share_safe {
171 if share_safe {
172 reqs.extend(requirements::load(Vfs { base: &store_path })?);
172 reqs.extend(requirements::load(Vfs { base: &store_path })?);
173 }
173 }
174
174
175 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
175 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
176 config.combine_with_repo(&repo_config_files)?
176 config.combine_with_repo(&repo_config_files)?
177 } else {
177 } else {
178 config.clone()
178 config.clone()
179 };
179 };
180
180
181 let repo = Self {
181 let repo = Self {
182 requirements: reqs,
182 requirements: reqs,
183 working_directory,
183 working_directory,
184 store: store_path,
184 store: store_path,
185 dot_hg,
185 dot_hg,
186 config: repo_config,
186 config: repo_config,
187 dirstate_parents: LazyCell::new(),
187 dirstate_parents: LazyCell::new(),
188 dirstate_map: LazyCell::new(),
188 dirstate_map: LazyCell::new(),
189 changelog: LazyCell::new(),
189 changelog: LazyCell::new(),
190 manifestlog: LazyCell::new(),
190 manifestlog: LazyCell::new(),
191 };
191 };
192
192
193 requirements::check(&repo)?;
193 requirements::check(&repo)?;
194
194
195 Ok(repo)
195 Ok(repo)
196 }
196 }
197
197
198 pub fn working_directory_path(&self) -> &Path {
198 pub fn working_directory_path(&self) -> &Path {
199 &self.working_directory
199 &self.working_directory
200 }
200 }
201
201
202 pub fn requirements(&self) -> &HashSet<String> {
202 pub fn requirements(&self) -> &HashSet<String> {
203 &self.requirements
203 &self.requirements
204 }
204 }
205
205
206 pub fn config(&self) -> &Config {
206 pub fn config(&self) -> &Config {
207 &self.config
207 &self.config
208 }
208 }
209
209
210 /// For accessing repository files (in `.hg`), except for the store
210 /// For accessing repository files (in `.hg`), except for the store
211 /// (`.hg/store`).
211 /// (`.hg/store`).
212 pub fn hg_vfs(&self) -> Vfs<'_> {
212 pub fn hg_vfs(&self) -> Vfs<'_> {
213 Vfs { base: &self.dot_hg }
213 Vfs { base: &self.dot_hg }
214 }
214 }
215
215
216 /// For accessing repository store files (in `.hg/store`)
216 /// For accessing repository store files (in `.hg/store`)
217 pub fn store_vfs(&self) -> Vfs<'_> {
217 pub fn store_vfs(&self) -> Vfs<'_> {
218 Vfs { base: &self.store }
218 Vfs { base: &self.store }
219 }
219 }
220
220
221 /// For accessing the working copy
221 /// For accessing the working copy
222 pub fn working_directory_vfs(&self) -> Vfs<'_> {
222 pub fn working_directory_vfs(&self) -> Vfs<'_> {
223 Vfs {
223 Vfs {
224 base: &self.working_directory,
224 base: &self.working_directory,
225 }
225 }
226 }
226 }
227
227
228 pub fn try_with_wlock_no_wait<R>(
228 pub fn try_with_wlock_no_wait<R>(
229 &self,
229 &self,
230 f: impl FnOnce() -> R,
230 f: impl FnOnce() -> R,
231 ) -> Result<R, LockError> {
231 ) -> Result<R, LockError> {
232 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
232 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
233 }
233 }
234
234
235 pub fn has_dirstate_v2(&self) -> bool {
235 /// Whether this repo should use dirstate-v2.
236 /// The presence of `dirstate-v2` in the requirements does not mean that
237 /// the on-disk dirstate is necessarily in version 2. In most cases,
238 /// a dirstate-v2 file will indeed be found, but in rare cases (like the
239 /// upgrade mechanism being cut short), the on-disk version will be a
240 /// v1 file.
241 /// Semantically, having a requirement only means that a client should be
242 /// able to understand the repo *if* it uses the requirement, but not that
243 /// the requirement is actually used.
244 pub fn use_dirstate_v2(&self) -> bool {
236 self.requirements
245 self.requirements
237 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
246 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
238 }
247 }
239
248
240 pub fn has_sparse(&self) -> bool {
249 pub fn has_sparse(&self) -> bool {
241 self.requirements.contains(requirements::SPARSE_REQUIREMENT)
250 self.requirements.contains(requirements::SPARSE_REQUIREMENT)
242 }
251 }
243
252
244 pub fn has_narrow(&self) -> bool {
253 pub fn has_narrow(&self) -> bool {
245 self.requirements.contains(requirements::NARROW_REQUIREMENT)
254 self.requirements.contains(requirements::NARROW_REQUIREMENT)
246 }
255 }
247
256
248 pub fn has_nodemap(&self) -> bool {
257 pub fn has_nodemap(&self) -> bool {
249 self.requirements
258 self.requirements
250 .contains(requirements::NODEMAP_REQUIREMENT)
259 .contains(requirements::NODEMAP_REQUIREMENT)
251 }
260 }
252
261
253 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
262 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
254 Ok(self
263 Ok(self
255 .hg_vfs()
264 .hg_vfs()
256 .read("dirstate")
265 .read("dirstate")
257 .io_not_found_as_none()?
266 .io_not_found_as_none()?
258 .unwrap_or_default())
267 .unwrap_or_default())
259 }
268 }
260
269
261 fn dirstate_identity(&self) -> Result<Option<u64>, HgError> {
270 fn dirstate_identity(&self) -> Result<Option<u64>, HgError> {
262 use std::os::unix::fs::MetadataExt;
271 use std::os::unix::fs::MetadataExt;
263 Ok(self
272 Ok(self
264 .hg_vfs()
273 .hg_vfs()
265 .symlink_metadata("dirstate")
274 .symlink_metadata("dirstate")
266 .io_not_found_as_none()?
275 .io_not_found_as_none()?
267 .map(|meta| meta.ino()))
276 .map(|meta| meta.ino()))
268 }
277 }
269
278
270 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
279 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
271 Ok(*self
280 Ok(*self
272 .dirstate_parents
281 .dirstate_parents
273 .get_or_init(|| self.read_dirstate_parents())?)
282 .get_or_init(|| self.read_dirstate_parents())?)
274 }
283 }
275
284
276 fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
285 fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
277 let dirstate = self.dirstate_file_contents()?;
286 let dirstate = self.dirstate_file_contents()?;
278 let parents = if dirstate.is_empty() {
287 let parents = if dirstate.is_empty() {
279 DirstateParents::NULL
288 DirstateParents::NULL
280 } else if self.has_dirstate_v2() {
289 } else if self.use_dirstate_v2() {
281 let docket =
290 let docket =
282 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
291 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
283 docket.parents()
292 docket.parents()
284 } else {
293 } else {
285 *crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
294 *crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
286 };
295 };
287 self.dirstate_parents.set(parents);
296 self.dirstate_parents.set(parents);
288 Ok(parents)
297 Ok(parents)
289 }
298 }
290
299
291 /// Returns the information read from the dirstate docket necessary to
300 /// Returns the information read from the dirstate docket necessary to
292 /// check if the data file has been updated/deleted by another process
301 /// check if the data file has been updated/deleted by another process
293 /// since we last read the dirstate.
302 /// since we last read the dirstate.
294 /// Namely, the inode, data file uuid and the data size.
303 /// Namely, the inode, data file uuid and the data size.
295 fn get_dirstate_data_file_integrity(
304 fn get_dirstate_data_file_integrity(
296 &self,
305 &self,
297 ) -> Result<DirstateMapIdentity, HgError> {
306 ) -> Result<DirstateMapIdentity, HgError> {
298 assert!(
307 assert!(
299 self.has_dirstate_v2(),
308 self.use_dirstate_v2(),
300 "accessing dirstate data file ID without dirstate-v2"
309 "accessing dirstate data file ID without dirstate-v2"
301 );
310 );
302 // Get the identity before the contents since we could have a race
311 // Get the identity before the contents since we could have a race
303 // between the two. Having an identity that is too old is fine, but
312 // between the two. Having an identity that is too old is fine, but
304 // one that is younger than the content change is bad.
313 // one that is younger than the content change is bad.
305 let identity = self.dirstate_identity()?;
314 let identity = self.dirstate_identity()?;
306 let dirstate = self.dirstate_file_contents()?;
315 let dirstate = self.dirstate_file_contents()?;
307 if dirstate.is_empty() {
316 if dirstate.is_empty() {
308 self.dirstate_parents.set(DirstateParents::NULL);
317 self.dirstate_parents.set(DirstateParents::NULL);
309 Ok((identity, None, 0))
318 Ok((identity, None, 0))
310 } else {
319 } else {
311 let docket =
320 let docket =
312 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
321 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
313 self.dirstate_parents.set(docket.parents());
322 self.dirstate_parents.set(docket.parents());
314 Ok((identity, Some(docket.uuid.to_owned()), docket.data_size()))
323 Ok((identity, Some(docket.uuid.to_owned()), docket.data_size()))
315 }
324 }
316 }
325 }
317
326
318 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
327 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
319 if self.has_dirstate_v2() {
328 if self.use_dirstate_v2() {
320 // The v2 dirstate is split into a docket and a data file.
329 // The v2 dirstate is split into a docket and a data file.
321 // Since we don't always take the `wlock` to read it
330 // Since we don't always take the `wlock` to read it
322 // (like in `hg status`), it is susceptible to races.
331 // (like in `hg status`), it is susceptible to races.
323 // A simple retry method should be enough since full rewrites
332 // A simple retry method should be enough since full rewrites
324 // only happen when too much garbage data is present and
333 // only happen when too much garbage data is present and
325 // this race is unlikely.
334 // this race is unlikely.
326 let mut tries = 0;
335 let mut tries = 0;
327
336
328 while tries < V2_MAX_READ_ATTEMPTS {
337 while tries < V2_MAX_READ_ATTEMPTS {
329 tries += 1;
338 tries += 1;
330 match self.read_docket_and_data_file() {
339 match self.read_docket_and_data_file() {
331 Ok(m) => {
340 Ok(m) => {
332 return Ok(m);
341 return Ok(m);
333 }
342 }
334 Err(e) => match e {
343 Err(e) => match e {
335 DirstateError::Common(HgError::RaceDetected(
344 DirstateError::Common(HgError::RaceDetected(
336 context,
345 context,
337 )) => {
346 )) => {
338 log::info!(
347 log::info!(
339 "dirstate read race detected {} (retry {}/{})",
348 "dirstate read race detected {} (retry {}/{})",
340 context,
349 context,
341 tries,
350 tries,
342 V2_MAX_READ_ATTEMPTS,
351 V2_MAX_READ_ATTEMPTS,
343 );
352 );
344 continue;
353 continue;
345 }
354 }
346 _ => return Err(e),
355 _ => return Err(e),
347 },
356 },
348 }
357 }
349 }
358 }
350 let error = HgError::abort(
359 let error = HgError::abort(
351 format!("dirstate read race happened {tries} times in a row"),
360 format!("dirstate read race happened {tries} times in a row"),
352 255,
361 255,
353 None,
362 None,
354 );
363 );
355 Err(DirstateError::Common(error))
364 Err(DirstateError::Common(error))
356 } else {
365 } else {
357 debug_wait_for_file_or_print(
366 debug_wait_for_file_or_print(
358 self.config(),
367 self.config(),
359 "dirstate.pre-read-file",
368 "dirstate.pre-read-file",
360 );
369 );
361 let identity = self.dirstate_identity()?;
370 let identity = self.dirstate_identity()?;
362 let dirstate_file_contents = self.dirstate_file_contents()?;
371 let dirstate_file_contents = self.dirstate_file_contents()?;
363 if dirstate_file_contents.is_empty() {
372 if dirstate_file_contents.is_empty() {
364 self.dirstate_parents.set(DirstateParents::NULL);
373 self.dirstate_parents.set(DirstateParents::NULL);
365 Ok(OwningDirstateMap::new_empty(Vec::new()))
374 Ok(OwningDirstateMap::new_empty(Vec::new()))
366 } else {
375 } else {
367 let (map, parents) = OwningDirstateMap::new_v1(
376 let (map, parents) = OwningDirstateMap::new_v1(
368 dirstate_file_contents,
377 dirstate_file_contents,
369 identity,
378 identity,
370 )?;
379 )?;
371 self.dirstate_parents.set(parents);
380 self.dirstate_parents.set(parents);
372 Ok(map)
381 Ok(map)
373 }
382 }
374 }
383 }
375 }
384 }
376
385
377 fn read_docket_and_data_file(
386 fn read_docket_and_data_file(
378 &self,
387 &self,
379 ) -> Result<OwningDirstateMap, DirstateError> {
388 ) -> Result<OwningDirstateMap, DirstateError> {
380 debug_wait_for_file_or_print(self.config(), "dirstate.pre-read-file");
389 debug_wait_for_file_or_print(self.config(), "dirstate.pre-read-file");
381 let dirstate_file_contents = self.dirstate_file_contents()?;
390 let dirstate_file_contents = self.dirstate_file_contents()?;
382 let identity = self.dirstate_identity()?;
391 let identity = self.dirstate_identity()?;
383 if dirstate_file_contents.is_empty() {
392 if dirstate_file_contents.is_empty() {
384 self.dirstate_parents.set(DirstateParents::NULL);
393 self.dirstate_parents.set(DirstateParents::NULL);
385 return Ok(OwningDirstateMap::new_empty(Vec::new()));
394 return Ok(OwningDirstateMap::new_empty(Vec::new()));
386 }
395 }
387 let docket = crate::dirstate_tree::on_disk::read_docket(
396 let docket = crate::dirstate_tree::on_disk::read_docket(
388 &dirstate_file_contents,
397 &dirstate_file_contents,
389 )?;
398 )?;
390 debug_wait_for_file_or_print(
399 debug_wait_for_file_or_print(
391 self.config(),
400 self.config(),
392 "dirstate.post-docket-read-file",
401 "dirstate.post-docket-read-file",
393 );
402 );
394 self.dirstate_parents.set(docket.parents());
403 self.dirstate_parents.set(docket.parents());
395 let uuid = docket.uuid.to_owned();
404 let uuid = docket.uuid.to_owned();
396 let data_size = docket.data_size();
405 let data_size = docket.data_size();
397
406
398 let context = "between reading dirstate docket and data file";
407 let context = "between reading dirstate docket and data file";
399 let race_error = HgError::RaceDetected(context.into());
408 let race_error = HgError::RaceDetected(context.into());
400 let metadata = docket.tree_metadata();
409 let metadata = docket.tree_metadata();
401
410
402 let mut map = if crate::vfs::is_on_nfs_mount(docket.data_filename()) {
411 let mut map = if crate::vfs::is_on_nfs_mount(docket.data_filename()) {
403 // Don't mmap on NFS to prevent `SIGBUS` error on deletion
412 // Don't mmap on NFS to prevent `SIGBUS` error on deletion
404 let contents = self.hg_vfs().read(docket.data_filename());
413 let contents = self.hg_vfs().read(docket.data_filename());
405 let contents = match contents {
414 let contents = match contents {
406 Ok(c) => c,
415 Ok(c) => c,
407 Err(HgError::IoError { error, context }) => {
416 Err(HgError::IoError { error, context }) => {
408 match error.raw_os_error().expect("real os error") {
417 match error.raw_os_error().expect("real os error") {
409 // 2 = ENOENT, No such file or directory
418 // 2 = ENOENT, No such file or directory
410 // 116 = ESTALE, Stale NFS file handle
419 // 116 = ESTALE, Stale NFS file handle
411 //
420 //
412 // TODO match on `error.kind()` when
421 // TODO match on `error.kind()` when
413 // `ErrorKind::StaleNetworkFileHandle` is stable.
422 // `ErrorKind::StaleNetworkFileHandle` is stable.
414 2 | 116 => {
423 2 | 116 => {
415 // Race where the data file was deleted right after
424 // Race where the data file was deleted right after
416 // we read the docket, try again
425 // we read the docket, try again
417 return Err(race_error.into());
426 return Err(race_error.into());
418 }
427 }
419 _ => {
428 _ => {
420 return Err(
429 return Err(
421 HgError::IoError { error, context }.into()
430 HgError::IoError { error, context }.into()
422 )
431 )
423 }
432 }
424 }
433 }
425 }
434 }
426 Err(e) => return Err(e.into()),
435 Err(e) => return Err(e.into()),
427 };
436 };
428 OwningDirstateMap::new_v2(
437 OwningDirstateMap::new_v2(
429 contents, data_size, metadata, uuid, identity,
438 contents, data_size, metadata, uuid, identity,
430 )
439 )
431 } else {
440 } else {
432 match self
441 match self
433 .hg_vfs()
442 .hg_vfs()
434 .mmap_open(docket.data_filename())
443 .mmap_open(docket.data_filename())
435 .io_not_found_as_none()
444 .io_not_found_as_none()
436 {
445 {
437 Ok(Some(data_mmap)) => OwningDirstateMap::new_v2(
446 Ok(Some(data_mmap)) => OwningDirstateMap::new_v2(
438 data_mmap, data_size, metadata, uuid, identity,
447 data_mmap, data_size, metadata, uuid, identity,
439 ),
448 ),
440 Ok(None) => {
449 Ok(None) => {
441 // Race where the data file was deleted right after we
450 // Race where the data file was deleted right after we
442 // read the docket, try again
451 // read the docket, try again
443 return Err(race_error.into());
452 return Err(race_error.into());
444 }
453 }
445 Err(e) => return Err(e.into()),
454 Err(e) => return Err(e.into()),
446 }
455 }
447 }?;
456 }?;
448
457
449 let write_mode_config = self
458 let write_mode_config = self
450 .config()
459 .config()
451 .get_str(b"devel", b"dirstate.v2.data_update_mode")
460 .get_str(b"devel", b"dirstate.v2.data_update_mode")
452 .unwrap_or(Some("auto"))
461 .unwrap_or(Some("auto"))
453 .unwrap_or("auto"); // don't bother for devel options
462 .unwrap_or("auto"); // don't bother for devel options
454 let write_mode = match write_mode_config {
463 let write_mode = match write_mode_config {
455 "auto" => DirstateMapWriteMode::Auto,
464 "auto" => DirstateMapWriteMode::Auto,
456 "force-new" => DirstateMapWriteMode::ForceNewDataFile,
465 "force-new" => DirstateMapWriteMode::ForceNewDataFile,
457 "force-append" => DirstateMapWriteMode::ForceAppend,
466 "force-append" => DirstateMapWriteMode::ForceAppend,
458 _ => DirstateMapWriteMode::Auto,
467 _ => DirstateMapWriteMode::Auto,
459 };
468 };
460
469
461 map.with_dmap_mut(|m| m.set_write_mode(write_mode));
470 map.with_dmap_mut(|m| m.set_write_mode(write_mode));
462
471
463 Ok(map)
472 Ok(map)
464 }
473 }
465
474
466 pub fn dirstate_map(
475 pub fn dirstate_map(
467 &self,
476 &self,
468 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
477 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
469 self.dirstate_map.get_or_init(|| self.new_dirstate_map())
478 self.dirstate_map.get_or_init(|| self.new_dirstate_map())
470 }
479 }
471
480
472 pub fn dirstate_map_mut(
481 pub fn dirstate_map_mut(
473 &self,
482 &self,
474 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
483 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
475 self.dirstate_map
484 self.dirstate_map
476 .get_mut_or_init(|| self.new_dirstate_map())
485 .get_mut_or_init(|| self.new_dirstate_map())
477 }
486 }
478
487
479 fn new_changelog(&self) -> Result<Changelog, HgError> {
488 fn new_changelog(&self) -> Result<Changelog, HgError> {
480 Changelog::open(&self.store_vfs(), self.has_nodemap())
489 Changelog::open(&self.store_vfs(), self.has_nodemap())
481 }
490 }
482
491
483 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
492 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
484 self.changelog.get_or_init(|| self.new_changelog())
493 self.changelog.get_or_init(|| self.new_changelog())
485 }
494 }
486
495
487 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
496 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
488 self.changelog.get_mut_or_init(|| self.new_changelog())
497 self.changelog.get_mut_or_init(|| self.new_changelog())
489 }
498 }
490
499
491 fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
500 fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
492 Manifestlog::open(&self.store_vfs(), self.has_nodemap())
501 Manifestlog::open(&self.store_vfs(), self.has_nodemap())
493 }
502 }
494
503
495 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
504 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
496 self.manifestlog.get_or_init(|| self.new_manifestlog())
505 self.manifestlog.get_or_init(|| self.new_manifestlog())
497 }
506 }
498
507
499 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
508 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
500 self.manifestlog.get_mut_or_init(|| self.new_manifestlog())
509 self.manifestlog.get_mut_or_init(|| self.new_manifestlog())
501 }
510 }
502
511
503 /// Returns the manifest of the *changeset* with the given node ID
512 /// Returns the manifest of the *changeset* with the given node ID
504 pub fn manifest_for_node(
513 pub fn manifest_for_node(
505 &self,
514 &self,
506 node: impl Into<NodePrefix>,
515 node: impl Into<NodePrefix>,
507 ) -> Result<Manifest, RevlogError> {
516 ) -> Result<Manifest, RevlogError> {
508 self.manifestlog()?.data_for_node(
517 self.manifestlog()?.data_for_node(
509 self.changelog()?
518 self.changelog()?
510 .data_for_node(node.into())?
519 .data_for_node(node.into())?
511 .manifest_node()?
520 .manifest_node()?
512 .into(),
521 .into(),
513 )
522 )
514 }
523 }
515
524
516 /// Returns the manifest of the *changeset* with the given revision number
525 /// Returns the manifest of the *changeset* with the given revision number
517 pub fn manifest_for_rev(
526 pub fn manifest_for_rev(
518 &self,
527 &self,
519 revision: Revision,
528 revision: Revision,
520 ) -> Result<Manifest, RevlogError> {
529 ) -> Result<Manifest, RevlogError> {
521 self.manifestlog()?.data_for_node(
530 self.manifestlog()?.data_for_node(
522 self.changelog()?
531 self.changelog()?
523 .data_for_rev(revision)?
532 .data_for_rev(revision)?
524 .manifest_node()?
533 .manifest_node()?
525 .into(),
534 .into(),
526 )
535 )
527 }
536 }
528
537
529 pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
538 pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
530 if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
539 if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
531 Ok(entry.tracked())
540 Ok(entry.tracked())
532 } else {
541 } else {
533 Ok(false)
542 Ok(false)
534 }
543 }
535 }
544 }
536
545
537 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
546 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
538 Filelog::open(self, path)
547 Filelog::open(self, path)
539 }
548 }
540
549
541 /// Write to disk any updates that were made through `dirstate_map_mut`.
550 /// Write to disk any updates that were made through `dirstate_map_mut`.
542 ///
551 ///
543 /// The "wlock" must be held while calling this.
552 /// The "wlock" must be held while calling this.
544 /// See for example `try_with_wlock_no_wait`.
553 /// See for example `try_with_wlock_no_wait`.
545 ///
554 ///
546 /// TODO: have a `WritableRepo` type only accessible while holding the
555 /// TODO: have a `WritableRepo` type only accessible while holding the
547 /// lock?
556 /// lock?
548 pub fn write_dirstate(&self) -> Result<(), DirstateError> {
557 pub fn write_dirstate(&self) -> Result<(), DirstateError> {
549 let map = self.dirstate_map()?;
558 let map = self.dirstate_map()?;
550 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
559 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
551 // it’s unset
560 // it’s unset
552 let parents = self.dirstate_parents()?;
561 let parents = self.dirstate_parents()?;
553 let (packed_dirstate, old_uuid_to_remove) = if self.has_dirstate_v2() {
562 let (packed_dirstate, old_uuid_to_remove) = if self.use_dirstate_v2() {
554 let (identity, uuid, data_size) =
563 let (identity, uuid, data_size) =
555 self.get_dirstate_data_file_integrity()?;
564 self.get_dirstate_data_file_integrity()?;
556 let identity_changed = identity != map.old_identity();
565 let identity_changed = identity != map.old_identity();
557 let uuid_changed = uuid.as_deref() != map.old_uuid();
566 let uuid_changed = uuid.as_deref() != map.old_uuid();
558 let data_length_changed = data_size != map.old_data_size();
567 let data_length_changed = data_size != map.old_data_size();
559
568
560 if identity_changed || uuid_changed || data_length_changed {
569 if identity_changed || uuid_changed || data_length_changed {
561 // If any of identity, uuid or length have changed since
570 // If any of identity, uuid or length have changed since
562 // last disk read, don't write.
571 // last disk read, don't write.
563 // This is fine because either we're in a command that doesn't
572 // This is fine because either we're in a command that doesn't
564 // write anything too important (like `hg status`), or we're in
573 // write anything too important (like `hg status`), or we're in
565 // `hg add` and we're supposed to have taken the lock before
574 // `hg add` and we're supposed to have taken the lock before
566 // reading anyway.
575 // reading anyway.
567 //
576 //
568 // TODO complain loudly if we've changed anything important
577 // TODO complain loudly if we've changed anything important
569 // without taking the lock.
578 // without taking the lock.
570 // (see `hg help config.format.use-dirstate-tracked-hint`)
579 // (see `hg help config.format.use-dirstate-tracked-hint`)
571 log::debug!(
580 log::debug!(
572 "dirstate has changed since last read, not updating."
581 "dirstate has changed since last read, not updating."
573 );
582 );
574 return Ok(());
583 return Ok(());
575 }
584 }
576
585
577 let uuid_opt = map.old_uuid();
586 let uuid_opt = map.old_uuid();
578 let write_mode = if uuid_opt.is_some() {
587 let write_mode = if uuid_opt.is_some() {
579 DirstateMapWriteMode::Auto
588 DirstateMapWriteMode::Auto
580 } else {
589 } else {
581 DirstateMapWriteMode::ForceNewDataFile
590 DirstateMapWriteMode::ForceNewDataFile
582 };
591 };
583 let (data, tree_metadata, append, old_data_size) =
592 let (data, tree_metadata, append, old_data_size) =
584 map.pack_v2(write_mode)?;
593 map.pack_v2(write_mode)?;
585
594
586 // Reuse the uuid, or generate a new one, keeping the old for
595 // Reuse the uuid, or generate a new one, keeping the old for
587 // deletion.
596 // deletion.
588 let (uuid, old_uuid) = match uuid_opt {
597 let (uuid, old_uuid) = match uuid_opt {
589 Some(uuid) => {
598 Some(uuid) => {
590 let as_str = std::str::from_utf8(uuid)
599 let as_str = std::str::from_utf8(uuid)
591 .map_err(|_| {
600 .map_err(|_| {
592 HgError::corrupted(
601 HgError::corrupted(
593 "non-UTF-8 dirstate data file ID",
602 "non-UTF-8 dirstate data file ID",
594 )
603 )
595 })?
604 })?
596 .to_owned();
605 .to_owned();
597 if append {
606 if append {
598 (as_str, None)
607 (as_str, None)
599 } else {
608 } else {
600 (DirstateDocket::new_uid(), Some(as_str))
609 (DirstateDocket::new_uid(), Some(as_str))
601 }
610 }
602 }
611 }
603 None => (DirstateDocket::new_uid(), None),
612 None => (DirstateDocket::new_uid(), None),
604 };
613 };
605
614
606 let data_filename = format!("dirstate.{}", uuid);
615 let data_filename = format!("dirstate.{}", uuid);
607 let data_filename = self.hg_vfs().join(data_filename);
616 let data_filename = self.hg_vfs().join(data_filename);
608 let mut options = std::fs::OpenOptions::new();
617 let mut options = std::fs::OpenOptions::new();
609 options.write(true);
618 options.write(true);
610
619
611 // Why are we not using the O_APPEND flag when appending?
620 // Why are we not using the O_APPEND flag when appending?
612 //
621 //
613 // - O_APPEND makes it trickier to deal with garbage at the end of
622 // - O_APPEND makes it trickier to deal with garbage at the end of
614 // the file, left by a previous uncommitted transaction. By
623 // the file, left by a previous uncommitted transaction. By
615 // starting the write at [old_data_size] we make sure we erase
624 // starting the write at [old_data_size] we make sure we erase
616 // all such garbage.
625 // all such garbage.
617 //
626 //
618 // - O_APPEND requires to special-case 0-byte writes, whereas we
627 // - O_APPEND requires to special-case 0-byte writes, whereas we
619 // don't need that.
628 // don't need that.
620 //
629 //
621 // - Some OSes have bugs in implementation O_APPEND:
630 // - Some OSes have bugs in implementation O_APPEND:
622 // revlog.py talks about a Solaris bug, but we also saw some ZFS
631 // revlog.py talks about a Solaris bug, but we also saw some ZFS
623 // bug: https://github.com/openzfs/zfs/pull/3124,
632 // bug: https://github.com/openzfs/zfs/pull/3124,
624 // https://github.com/openzfs/zfs/issues/13370
633 // https://github.com/openzfs/zfs/issues/13370
625 //
634 //
626 if !append {
635 if !append {
627 log::trace!("creating a new dirstate data file");
636 log::trace!("creating a new dirstate data file");
628 options.create_new(true);
637 options.create_new(true);
629 } else {
638 } else {
630 log::trace!("appending to the dirstate data file");
639 log::trace!("appending to the dirstate data file");
631 }
640 }
632
641
633 let data_size = (|| {
642 let data_size = (|| {
634 // TODO: loop and try another random ID if !append and this
643 // TODO: loop and try another random ID if !append and this
635 // returns `ErrorKind::AlreadyExists`? Collision chance of two
644 // returns `ErrorKind::AlreadyExists`? Collision chance of two
636 // random IDs is one in 2**32
645 // random IDs is one in 2**32
637 let mut file = options.open(&data_filename)?;
646 let mut file = options.open(&data_filename)?;
638 if append {
647 if append {
639 file.seek(SeekFrom::Start(old_data_size as u64))?;
648 file.seek(SeekFrom::Start(old_data_size as u64))?;
640 }
649 }
641 file.write_all(&data)?;
650 file.write_all(&data)?;
642 file.flush()?;
651 file.flush()?;
643 file.seek(SeekFrom::Current(0))
652 file.seek(SeekFrom::Current(0))
644 })()
653 })()
645 .when_writing_file(&data_filename)?;
654 .when_writing_file(&data_filename)?;
646
655
647 let packed_dirstate = DirstateDocket::serialize(
656 let packed_dirstate = DirstateDocket::serialize(
648 parents,
657 parents,
649 tree_metadata,
658 tree_metadata,
650 data_size,
659 data_size,
651 uuid.as_bytes(),
660 uuid.as_bytes(),
652 )
661 )
653 .map_err(|_: std::num::TryFromIntError| {
662 .map_err(|_: std::num::TryFromIntError| {
654 HgError::corrupted("overflow in dirstate docket serialization")
663 HgError::corrupted("overflow in dirstate docket serialization")
655 })?;
664 })?;
656
665
657 (packed_dirstate, old_uuid)
666 (packed_dirstate, old_uuid)
658 } else {
667 } else {
659 let identity = self.dirstate_identity()?;
668 let identity = self.dirstate_identity()?;
660 if identity != map.old_identity() {
669 if identity != map.old_identity() {
661 // If identity changed since last disk read, don't write.
670 // If identity changed since last disk read, don't write.
662 // This is fine because either we're in a command that doesn't
671 // This is fine because either we're in a command that doesn't
663 // write anything too important (like `hg status`), or we're in
672 // write anything too important (like `hg status`), or we're in
664 // `hg add` and we're supposed to have taken the lock before
673 // `hg add` and we're supposed to have taken the lock before
665 // reading anyway.
674 // reading anyway.
666 //
675 //
667 // TODO complain loudly if we've changed anything important
676 // TODO complain loudly if we've changed anything important
668 // without taking the lock.
677 // without taking the lock.
669 // (see `hg help config.format.use-dirstate-tracked-hint`)
678 // (see `hg help config.format.use-dirstate-tracked-hint`)
670 log::debug!(
679 log::debug!(
671 "dirstate has changed since last read, not updating."
680 "dirstate has changed since last read, not updating."
672 );
681 );
673 return Ok(());
682 return Ok(());
674 }
683 }
675 (map.pack_v1(parents)?, None)
684 (map.pack_v1(parents)?, None)
676 };
685 };
677
686
678 let vfs = self.hg_vfs();
687 let vfs = self.hg_vfs();
679 vfs.atomic_write("dirstate", &packed_dirstate)?;
688 vfs.atomic_write("dirstate", &packed_dirstate)?;
680 if let Some(uuid) = old_uuid_to_remove {
689 if let Some(uuid) = old_uuid_to_remove {
681 // Remove the old data file after the new docket pointing to the
690 // Remove the old data file after the new docket pointing to the
682 // new data file was written.
691 // new data file was written.
683 vfs.remove_file(format!("dirstate.{}", uuid))?;
692 vfs.remove_file(format!("dirstate.{}", uuid))?;
684 }
693 }
685 Ok(())
694 Ok(())
686 }
695 }
687 }
696 }
688
697
689 /// Lazily-initialized component of `Repo` with interior mutability
698 /// Lazily-initialized component of `Repo` with interior mutability
690 ///
699 ///
691 /// This differs from `OnceCell` in that the value can still be "deinitialized"
700 /// This differs from `OnceCell` in that the value can still be "deinitialized"
692 /// later by setting its inner `Option` to `None`. It also takes the
701 /// later by setting its inner `Option` to `None`. It also takes the
693 /// initialization function as an argument when the value is requested, not
702 /// initialization function as an argument when the value is requested, not
694 /// when the instance is created.
703 /// when the instance is created.
695 struct LazyCell<T> {
704 struct LazyCell<T> {
696 value: RefCell<Option<T>>,
705 value: RefCell<Option<T>>,
697 }
706 }
698
707
699 impl<T> LazyCell<T> {
708 impl<T> LazyCell<T> {
700 fn new() -> Self {
709 fn new() -> Self {
701 Self {
710 Self {
702 value: RefCell::new(None),
711 value: RefCell::new(None),
703 }
712 }
704 }
713 }
705
714
706 fn set(&self, value: T) {
715 fn set(&self, value: T) {
707 *self.value.borrow_mut() = Some(value)
716 *self.value.borrow_mut() = Some(value)
708 }
717 }
709
718
710 fn get_or_init<E>(
719 fn get_or_init<E>(
711 &self,
720 &self,
712 init: impl Fn() -> Result<T, E>,
721 init: impl Fn() -> Result<T, E>,
713 ) -> Result<Ref<T>, E> {
722 ) -> Result<Ref<T>, E> {
714 let mut borrowed = self.value.borrow();
723 let mut borrowed = self.value.borrow();
715 if borrowed.is_none() {
724 if borrowed.is_none() {
716 drop(borrowed);
725 drop(borrowed);
717 // Only use `borrow_mut` if it is really needed to avoid panic in
726 // Only use `borrow_mut` if it is really needed to avoid panic in
718 // case there is another outstanding borrow but mutation is not
727 // case there is another outstanding borrow but mutation is not
719 // needed.
728 // needed.
720 *self.value.borrow_mut() = Some(init()?);
729 *self.value.borrow_mut() = Some(init()?);
721 borrowed = self.value.borrow()
730 borrowed = self.value.borrow()
722 }
731 }
723 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
732 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
724 }
733 }
725
734
726 fn get_mut_or_init<E>(
735 fn get_mut_or_init<E>(
727 &self,
736 &self,
728 init: impl Fn() -> Result<T, E>,
737 init: impl Fn() -> Result<T, E>,
729 ) -> Result<RefMut<T>, E> {
738 ) -> Result<RefMut<T>, E> {
730 let mut borrowed = self.value.borrow_mut();
739 let mut borrowed = self.value.borrow_mut();
731 if borrowed.is_none() {
740 if borrowed.is_none() {
732 *borrowed = Some(init()?);
741 *borrowed = Some(init()?);
733 }
742 }
734 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
743 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
735 }
744 }
736 }
745 }
General Comments 0
You need to be logged in to leave comments. Login now