##// END OF EJS Templates
rust-repo: move dirstate-v2 opening to a separate method...
Raphaël Gomès -
r51122:cbd4c923 stable
parent child Browse files
Show More
@@ -1,599 +1,609 b''
1 1 use crate::changelog::Changelog;
2 2 use crate::config::{Config, ConfigError, ConfigParseError};
3 3 use crate::dirstate::DirstateParents;
4 4 use crate::dirstate_tree::dirstate_map::DirstateMapWriteMode;
5 5 use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
6 6 use crate::dirstate_tree::owning::OwningDirstateMap;
7 7 use crate::errors::HgResultExt;
8 8 use crate::errors::{HgError, IoResultExt};
9 9 use crate::lock::{try_with_lock_no_wait, LockError};
10 10 use crate::manifest::{Manifest, Manifestlog};
11 11 use crate::revlog::filelog::Filelog;
12 12 use crate::revlog::revlog::RevlogError;
13 13 use crate::utils::files::get_path_from_bytes;
14 14 use crate::utils::hg_path::HgPath;
15 15 use crate::utils::SliceExt;
16 16 use crate::vfs::{is_dir, is_file, Vfs};
17 17 use crate::{requirements, NodePrefix};
18 18 use crate::{DirstateError, Revision};
19 19 use std::cell::{Ref, RefCell, RefMut};
20 20 use std::collections::HashSet;
21 21 use std::io::Seek;
22 22 use std::io::SeekFrom;
23 23 use std::io::Write as IoWrite;
24 24 use std::path::{Path, PathBuf};
25 25
26 26 /// A repository on disk
27 27 pub struct Repo {
28 28 working_directory: PathBuf,
29 29 dot_hg: PathBuf,
30 30 store: PathBuf,
31 31 requirements: HashSet<String>,
32 32 config: Config,
33 33 dirstate_parents: LazyCell<DirstateParents>,
34 34 dirstate_data_file_uuid: LazyCell<Option<Vec<u8>>>,
35 35 dirstate_map: LazyCell<OwningDirstateMap>,
36 36 changelog: LazyCell<Changelog>,
37 37 manifestlog: LazyCell<Manifestlog>,
38 38 }
39 39
40 40 #[derive(Debug, derive_more::From)]
41 41 pub enum RepoError {
42 42 NotFound {
43 43 at: PathBuf,
44 44 },
45 45 #[from]
46 46 ConfigParseError(ConfigParseError),
47 47 #[from]
48 48 Other(HgError),
49 49 }
50 50
51 51 impl From<ConfigError> for RepoError {
52 52 fn from(error: ConfigError) -> Self {
53 53 match error {
54 54 ConfigError::Parse(error) => error.into(),
55 55 ConfigError::Other(error) => error.into(),
56 56 }
57 57 }
58 58 }
59 59
60 60 impl Repo {
61 61 /// tries to find nearest repository root in current working directory or
62 62 /// its ancestors
63 63 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
64 64 let current_directory = crate::utils::current_dir()?;
65 65 // ancestors() is inclusive: it first yields `current_directory`
66 66 // as-is.
67 67 for ancestor in current_directory.ancestors() {
68 68 if is_dir(ancestor.join(".hg"))? {
69 69 return Ok(ancestor.to_path_buf());
70 70 }
71 71 }
72 72 return Err(RepoError::NotFound {
73 73 at: current_directory,
74 74 });
75 75 }
76 76
77 77 /// Find a repository, either at the given path (which must contain a `.hg`
78 78 /// sub-directory) or by searching the current directory and its
79 79 /// ancestors.
80 80 ///
81 81 /// A method with two very different "modes" like this usually a code smell
82 82 /// to make two methods instead, but in this case an `Option` is what rhg
83 83 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
84 84 /// Having two methods would just move that `if` to almost all callers.
85 85 pub fn find(
86 86 config: &Config,
87 87 explicit_path: Option<PathBuf>,
88 88 ) -> Result<Self, RepoError> {
89 89 if let Some(root) = explicit_path {
90 90 if is_dir(root.join(".hg"))? {
91 91 Self::new_at_path(root.to_owned(), config)
92 92 } else if is_file(&root)? {
93 93 Err(HgError::unsupported("bundle repository").into())
94 94 } else {
95 95 Err(RepoError::NotFound {
96 96 at: root.to_owned(),
97 97 })
98 98 }
99 99 } else {
100 100 let root = Self::find_repo_root()?;
101 101 Self::new_at_path(root, config)
102 102 }
103 103 }
104 104
105 105 /// To be called after checking that `.hg` is a sub-directory
106 106 fn new_at_path(
107 107 working_directory: PathBuf,
108 108 config: &Config,
109 109 ) -> Result<Self, RepoError> {
110 110 let dot_hg = working_directory.join(".hg");
111 111
112 112 let mut repo_config_files = Vec::new();
113 113 repo_config_files.push(dot_hg.join("hgrc"));
114 114 repo_config_files.push(dot_hg.join("hgrc-not-shared"));
115 115
116 116 let hg_vfs = Vfs { base: &dot_hg };
117 117 let mut reqs = requirements::load_if_exists(hg_vfs)?;
118 118 let relative =
119 119 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
120 120 let shared =
121 121 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
122 122
123 123 // From `mercurial/localrepo.py`:
124 124 //
125 125 // if .hg/requires contains the sharesafe requirement, it means
126 126 // there exists a `.hg/store/requires` too and we should read it
127 127 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
128 128 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
129 129 // is not present, refer checkrequirementscompat() for that
130 130 //
131 131 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
132 132 // repository was shared the old way. We check the share source
133 133 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
134 134 // current repository needs to be reshared
135 135 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
136 136
137 137 let store_path;
138 138 if !shared {
139 139 store_path = dot_hg.join("store");
140 140 } else {
141 141 let bytes = hg_vfs.read("sharedpath")?;
142 142 let mut shared_path =
143 143 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
144 144 .to_owned();
145 145 if relative {
146 146 shared_path = dot_hg.join(shared_path)
147 147 }
148 148 if !is_dir(&shared_path)? {
149 149 return Err(HgError::corrupted(format!(
150 150 ".hg/sharedpath points to nonexistent directory {}",
151 151 shared_path.display()
152 152 ))
153 153 .into());
154 154 }
155 155
156 156 store_path = shared_path.join("store");
157 157
158 158 let source_is_share_safe =
159 159 requirements::load(Vfs { base: &shared_path })?
160 160 .contains(requirements::SHARESAFE_REQUIREMENT);
161 161
162 162 if share_safe != source_is_share_safe {
163 163 return Err(HgError::unsupported("share-safe mismatch").into());
164 164 }
165 165
166 166 if share_safe {
167 167 repo_config_files.insert(0, shared_path.join("hgrc"))
168 168 }
169 169 }
170 170 if share_safe {
171 171 reqs.extend(requirements::load(Vfs { base: &store_path })?);
172 172 }
173 173
174 174 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
175 175 config.combine_with_repo(&repo_config_files)?
176 176 } else {
177 177 config.clone()
178 178 };
179 179
180 180 let repo = Self {
181 181 requirements: reqs,
182 182 working_directory,
183 183 store: store_path,
184 184 dot_hg,
185 185 config: repo_config,
186 186 dirstate_parents: LazyCell::new(),
187 187 dirstate_data_file_uuid: LazyCell::new(),
188 188 dirstate_map: LazyCell::new(),
189 189 changelog: LazyCell::new(),
190 190 manifestlog: LazyCell::new(),
191 191 };
192 192
193 193 requirements::check(&repo)?;
194 194
195 195 Ok(repo)
196 196 }
197 197
198 198 pub fn working_directory_path(&self) -> &Path {
199 199 &self.working_directory
200 200 }
201 201
202 202 pub fn requirements(&self) -> &HashSet<String> {
203 203 &self.requirements
204 204 }
205 205
206 206 pub fn config(&self) -> &Config {
207 207 &self.config
208 208 }
209 209
210 210 /// For accessing repository files (in `.hg`), except for the store
211 211 /// (`.hg/store`).
212 212 pub fn hg_vfs(&self) -> Vfs<'_> {
213 213 Vfs { base: &self.dot_hg }
214 214 }
215 215
216 216 /// For accessing repository store files (in `.hg/store`)
217 217 pub fn store_vfs(&self) -> Vfs<'_> {
218 218 Vfs { base: &self.store }
219 219 }
220 220
221 221 /// For accessing the working copy
222 222 pub fn working_directory_vfs(&self) -> Vfs<'_> {
223 223 Vfs {
224 224 base: &self.working_directory,
225 225 }
226 226 }
227 227
228 228 pub fn try_with_wlock_no_wait<R>(
229 229 &self,
230 230 f: impl FnOnce() -> R,
231 231 ) -> Result<R, LockError> {
232 232 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
233 233 }
234 234
235 235 pub fn has_dirstate_v2(&self) -> bool {
236 236 self.requirements
237 237 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
238 238 }
239 239
240 240 pub fn has_sparse(&self) -> bool {
241 241 self.requirements.contains(requirements::SPARSE_REQUIREMENT)
242 242 }
243 243
244 244 pub fn has_narrow(&self) -> bool {
245 245 self.requirements.contains(requirements::NARROW_REQUIREMENT)
246 246 }
247 247
248 248 pub fn has_nodemap(&self) -> bool {
249 249 self.requirements
250 250 .contains(requirements::NODEMAP_REQUIREMENT)
251 251 }
252 252
253 253 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
254 254 Ok(self
255 255 .hg_vfs()
256 256 .read("dirstate")
257 257 .io_not_found_as_none()?
258 258 .unwrap_or(Vec::new()))
259 259 }
260 260
261 261 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
262 262 Ok(*self
263 263 .dirstate_parents
264 264 .get_or_init(|| self.read_dirstate_parents())?)
265 265 }
266 266
267 267 fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
268 268 let dirstate = self.dirstate_file_contents()?;
269 269 let parents = if dirstate.is_empty() {
270 270 if self.has_dirstate_v2() {
271 271 self.dirstate_data_file_uuid.set(None);
272 272 }
273 273 DirstateParents::NULL
274 274 } else if self.has_dirstate_v2() {
275 275 let docket =
276 276 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
277 277 self.dirstate_data_file_uuid
278 278 .set(Some(docket.uuid.to_owned()));
279 279 docket.parents()
280 280 } else {
281 281 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
282 282 .clone()
283 283 };
284 284 self.dirstate_parents.set(parents);
285 285 Ok(parents)
286 286 }
287 287
288 288 fn read_dirstate_data_file_uuid(
289 289 &self,
290 290 ) -> Result<Option<Vec<u8>>, HgError> {
291 291 assert!(
292 292 self.has_dirstate_v2(),
293 293 "accessing dirstate data file ID without dirstate-v2"
294 294 );
295 295 let dirstate = self.dirstate_file_contents()?;
296 296 if dirstate.is_empty() {
297 297 self.dirstate_parents.set(DirstateParents::NULL);
298 298 Ok(None)
299 299 } else {
300 300 let docket =
301 301 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
302 302 self.dirstate_parents.set(docket.parents());
303 303 Ok(Some(docket.uuid.to_owned()))
304 304 }
305 305 }
306 306
307 307 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
308 if self.has_dirstate_v2() {
309 self.read_docket_and_data_file()
310 } else {
311 let dirstate_file_contents = self.dirstate_file_contents()?;
312 if dirstate_file_contents.is_empty() {
313 self.dirstate_parents.set(DirstateParents::NULL);
314 Ok(OwningDirstateMap::new_empty(Vec::new()))
315 } else {
316 let (map, parents) =
317 OwningDirstateMap::new_v1(dirstate_file_contents)?;
318 self.dirstate_parents.set(parents);
319 Ok(map)
320 }
321 }
322 }
323
324 fn read_docket_and_data_file(
325 &self,
326 ) -> Result<OwningDirstateMap, DirstateError> {
308 327 let dirstate_file_contents = self.dirstate_file_contents()?;
309 328 if dirstate_file_contents.is_empty() {
310 329 self.dirstate_parents.set(DirstateParents::NULL);
311 if self.has_dirstate_v2() {
312 self.dirstate_data_file_uuid.set(None);
313 }
314 Ok(OwningDirstateMap::new_empty(Vec::new()))
315 } else if self.has_dirstate_v2() {
316 let docket = crate::dirstate_tree::on_disk::read_docket(
317 &dirstate_file_contents,
318 )?;
319 self.dirstate_parents.set(docket.parents());
320 self.dirstate_data_file_uuid
321 .set(Some(docket.uuid.to_owned()));
322 let data_size = docket.data_size();
323 let metadata = docket.tree_metadata();
324 let mut map =
325 if crate::vfs::is_on_nfs_mount(docket.data_filename()) {
326 // Don't mmap on NFS to prevent `SIGBUS` error on deletion
327 OwningDirstateMap::new_v2(
328 self.hg_vfs().read(docket.data_filename())?,
329 data_size,
330 metadata,
331 )
332 } else if let Some(data_mmap) = self
333 .hg_vfs()
334 .mmap_open(docket.data_filename())
335 .io_not_found_as_none()?
336 {
337 OwningDirstateMap::new_v2(data_mmap, data_size, metadata)
338 } else {
339 OwningDirstateMap::new_v2(Vec::new(), data_size, metadata)
340 }?;
330 self.dirstate_data_file_uuid.set(None);
331 return Ok(OwningDirstateMap::new_empty(Vec::new()));
332 }
333 let docket = crate::dirstate_tree::on_disk::read_docket(
334 &dirstate_file_contents,
335 )?;
336 self.dirstate_parents.set(docket.parents());
337 self.dirstate_data_file_uuid
338 .set(Some(docket.uuid.to_owned()));
339 let data_size = docket.data_size();
340 let metadata = docket.tree_metadata();
341 let mut map = if crate::vfs::is_on_nfs_mount(docket.data_filename()) {
342 // Don't mmap on NFS to prevent `SIGBUS` error on deletion
343 OwningDirstateMap::new_v2(
344 self.hg_vfs().read(docket.data_filename())?,
345 data_size,
346 metadata,
347 )
348 } else if let Some(data_mmap) = self
349 .hg_vfs()
350 .mmap_open(docket.data_filename())
351 .io_not_found_as_none()?
352 {
353 OwningDirstateMap::new_v2(data_mmap, data_size, metadata)
354 } else {
355 OwningDirstateMap::new_v2(Vec::new(), data_size, metadata)
356 }?;
341 357
342 let write_mode_config = self
343 .config()
344 .get_str(b"devel", b"dirstate.v2.data_update_mode")
345 .unwrap_or(Some("auto"))
346 .unwrap_or("auto"); // don't bother for devel options
347 let write_mode = match write_mode_config {
348 "auto" => DirstateMapWriteMode::Auto,
349 "force-new" => DirstateMapWriteMode::ForceNewDataFile,
350 "force-append" => DirstateMapWriteMode::ForceAppend,
351 _ => DirstateMapWriteMode::Auto,
352 };
358 let write_mode_config = self
359 .config()
360 .get_str(b"devel", b"dirstate.v2.data_update_mode")
361 .unwrap_or(Some("auto"))
362 .unwrap_or("auto"); // don't bother for devel options
363 let write_mode = match write_mode_config {
364 "auto" => DirstateMapWriteMode::Auto,
365 "force-new" => DirstateMapWriteMode::ForceNewDataFile,
366 "force-append" => DirstateMapWriteMode::ForceAppend,
367 _ => DirstateMapWriteMode::Auto,
368 };
353 369
354 map.with_dmap_mut(|m| m.set_write_mode(write_mode));
370 map.with_dmap_mut(|m| m.set_write_mode(write_mode));
355 371
356 Ok(map)
357 } else {
358 let (map, parents) =
359 OwningDirstateMap::new_v1(dirstate_file_contents)?;
360 self.dirstate_parents.set(parents);
361 Ok(map)
362 }
372 Ok(map)
363 373 }
364 374
365 375 pub fn dirstate_map(
366 376 &self,
367 377 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
368 378 self.dirstate_map.get_or_init(|| self.new_dirstate_map())
369 379 }
370 380
371 381 pub fn dirstate_map_mut(
372 382 &self,
373 383 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
374 384 self.dirstate_map
375 385 .get_mut_or_init(|| self.new_dirstate_map())
376 386 }
377 387
378 388 fn new_changelog(&self) -> Result<Changelog, HgError> {
379 389 Changelog::open(&self.store_vfs(), self.has_nodemap())
380 390 }
381 391
382 392 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
383 393 self.changelog.get_or_init(|| self.new_changelog())
384 394 }
385 395
386 396 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
387 397 self.changelog.get_mut_or_init(|| self.new_changelog())
388 398 }
389 399
390 400 fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
391 401 Manifestlog::open(&self.store_vfs(), self.has_nodemap())
392 402 }
393 403
394 404 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
395 405 self.manifestlog.get_or_init(|| self.new_manifestlog())
396 406 }
397 407
398 408 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
399 409 self.manifestlog.get_mut_or_init(|| self.new_manifestlog())
400 410 }
401 411
402 412 /// Returns the manifest of the *changeset* with the given node ID
403 413 pub fn manifest_for_node(
404 414 &self,
405 415 node: impl Into<NodePrefix>,
406 416 ) -> Result<Manifest, RevlogError> {
407 417 self.manifestlog()?.data_for_node(
408 418 self.changelog()?
409 419 .data_for_node(node.into())?
410 420 .manifest_node()?
411 421 .into(),
412 422 )
413 423 }
414 424
415 425 /// Returns the manifest of the *changeset* with the given revision number
416 426 pub fn manifest_for_rev(
417 427 &self,
418 428 revision: Revision,
419 429 ) -> Result<Manifest, RevlogError> {
420 430 self.manifestlog()?.data_for_node(
421 431 self.changelog()?
422 432 .data_for_rev(revision)?
423 433 .manifest_node()?
424 434 .into(),
425 435 )
426 436 }
427 437
428 438 pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
429 439 if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
430 440 Ok(entry.tracked())
431 441 } else {
432 442 Ok(false)
433 443 }
434 444 }
435 445
436 446 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
437 447 Filelog::open(self, path)
438 448 }
439 449
440 450 /// Write to disk any updates that were made through `dirstate_map_mut`.
441 451 ///
442 452 /// The "wlock" must be held while calling this.
443 453 /// See for example `try_with_wlock_no_wait`.
444 454 ///
445 455 /// TODO: have a `WritableRepo` type only accessible while holding the
446 456 /// lock?
447 457 pub fn write_dirstate(&self) -> Result<(), DirstateError> {
448 458 let map = self.dirstate_map()?;
449 459 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
450 460 // it’s unset
451 461 let parents = self.dirstate_parents()?;
452 462 let (packed_dirstate, old_uuid_to_remove) = if self.has_dirstate_v2() {
453 463 let uuid_opt = self
454 464 .dirstate_data_file_uuid
455 465 .get_or_init(|| self.read_dirstate_data_file_uuid())?;
456 466 let uuid_opt = uuid_opt.as_ref();
457 467 let write_mode = if uuid_opt.is_some() {
458 468 DirstateMapWriteMode::Auto
459 469 } else {
460 470 DirstateMapWriteMode::ForceNewDataFile
461 471 };
462 472 let (data, tree_metadata, append, old_data_size) =
463 473 map.pack_v2(write_mode)?;
464 474
465 475 // Reuse the uuid, or generate a new one, keeping the old for
466 476 // deletion.
467 477 let (uuid, old_uuid) = match uuid_opt {
468 478 Some(uuid) => {
469 479 let as_str = std::str::from_utf8(uuid)
470 480 .map_err(|_| {
471 481 HgError::corrupted(
472 482 "non-UTF-8 dirstate data file ID",
473 483 )
474 484 })?
475 485 .to_owned();
476 486 if append {
477 487 (as_str, None)
478 488 } else {
479 489 (DirstateDocket::new_uid(), Some(as_str))
480 490 }
481 491 }
482 492 None => (DirstateDocket::new_uid(), None),
483 493 };
484 494
485 495 let data_filename = format!("dirstate.{}", uuid);
486 496 let data_filename = self.hg_vfs().join(data_filename);
487 497 let mut options = std::fs::OpenOptions::new();
488 498 options.write(true);
489 499
490 500 // Why are we not using the O_APPEND flag when appending?
491 501 //
492 502 // - O_APPEND makes it trickier to deal with garbage at the end of
493 503 // the file, left by a previous uncommitted transaction. By
494 504 // starting the write at [old_data_size] we make sure we erase
495 505 // all such garbage.
496 506 //
497 507 // - O_APPEND requires to special-case 0-byte writes, whereas we
498 508 // don't need that.
499 509 //
500 510 // - Some OSes have bugs in implementation O_APPEND:
501 511 // revlog.py talks about a Solaris bug, but we also saw some ZFS
502 512 // bug: https://github.com/openzfs/zfs/pull/3124,
503 513 // https://github.com/openzfs/zfs/issues/13370
504 514 //
505 515 if !append {
506 516 log::trace!("creating a new dirstate data file");
507 517 options.create_new(true);
508 518 } else {
509 519 log::trace!("appending to the dirstate data file");
510 520 }
511 521
512 522 let data_size = (|| {
513 523 // TODO: loop and try another random ID if !append and this
514 524 // returns `ErrorKind::AlreadyExists`? Collision chance of two
515 525 // random IDs is one in 2**32
516 526 let mut file = options.open(&data_filename)?;
517 527 if append {
518 528 file.seek(SeekFrom::Start(old_data_size as u64))?;
519 529 }
520 530 file.write_all(&data)?;
521 531 file.flush()?;
522 532 file.seek(SeekFrom::Current(0))
523 533 })()
524 534 .when_writing_file(&data_filename)?;
525 535
526 536 let packed_dirstate = DirstateDocket::serialize(
527 537 parents,
528 538 tree_metadata,
529 539 data_size,
530 540 uuid.as_bytes(),
531 541 )
532 542 .map_err(|_: std::num::TryFromIntError| {
533 543 HgError::corrupted("overflow in dirstate docket serialization")
534 544 })?;
535 545
536 546 (packed_dirstate, old_uuid)
537 547 } else {
538 548 (map.pack_v1(parents)?, None)
539 549 };
540 550
541 551 let vfs = self.hg_vfs();
542 552 vfs.atomic_write("dirstate", &packed_dirstate)?;
543 553 if let Some(uuid) = old_uuid_to_remove {
544 554 // Remove the old data file after the new docket pointing to the
545 555 // new data file was written.
546 556 vfs.remove_file(format!("dirstate.{}", uuid))?;
547 557 }
548 558 Ok(())
549 559 }
550 560 }
551 561
552 562 /// Lazily-initialized component of `Repo` with interior mutability
553 563 ///
554 564 /// This differs from `OnceCell` in that the value can still be "deinitialized"
555 565 /// later by setting its inner `Option` to `None`. It also takes the
556 566 /// initialization function as an argument when the value is requested, not
557 567 /// when the instance is created.
558 568 struct LazyCell<T> {
559 569 value: RefCell<Option<T>>,
560 570 }
561 571
562 572 impl<T> LazyCell<T> {
563 573 fn new() -> Self {
564 574 Self {
565 575 value: RefCell::new(None),
566 576 }
567 577 }
568 578
569 579 fn set(&self, value: T) {
570 580 *self.value.borrow_mut() = Some(value)
571 581 }
572 582
573 583 fn get_or_init<E>(
574 584 &self,
575 585 init: impl Fn() -> Result<T, E>,
576 586 ) -> Result<Ref<T>, E> {
577 587 let mut borrowed = self.value.borrow();
578 588 if borrowed.is_none() {
579 589 drop(borrowed);
580 590 // Only use `borrow_mut` if it is really needed to avoid panic in
581 591 // case there is another outstanding borrow but mutation is not
582 592 // needed.
583 593 *self.value.borrow_mut() = Some(init()?);
584 594 borrowed = self.value.borrow()
585 595 }
586 596 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
587 597 }
588 598
589 599 fn get_mut_or_init<E>(
590 600 &self,
591 601 init: impl Fn() -> Result<T, E>,
592 602 ) -> Result<RefMut<T>, E> {
593 603 let mut borrowed = self.value.borrow_mut();
594 604 if borrowed.is_none() {
595 605 *borrowed = Some(init()?);
596 606 }
597 607 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
598 608 }
599 609 }
General Comments 0
You need to be logged in to leave comments. Login now