##// END OF EJS Templates
dirstate-v2: don't mmap the data file when on NFS...
Raphaël Gomès -
r51075:be019ac8 stable
parent child Browse files
Show More
@@ -1,567 +1,574 b''
1 1 use crate::changelog::Changelog;
2 2 use crate::config::{Config, ConfigError, ConfigParseError};
3 3 use crate::dirstate::DirstateParents;
4 4 use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
5 5 use crate::dirstate_tree::owning::OwningDirstateMap;
6 6 use crate::errors::HgResultExt;
7 7 use crate::errors::{HgError, IoResultExt};
8 8 use crate::lock::{try_with_lock_no_wait, LockError};
9 9 use crate::manifest::{Manifest, Manifestlog};
10 10 use crate::revlog::filelog::Filelog;
11 11 use crate::revlog::revlog::RevlogError;
12 12 use crate::utils::files::get_path_from_bytes;
13 13 use crate::utils::hg_path::HgPath;
14 14 use crate::utils::SliceExt;
15 15 use crate::vfs::{is_dir, is_file, Vfs};
16 16 use crate::{requirements, NodePrefix};
17 17 use crate::{DirstateError, Revision};
18 18 use std::cell::{Ref, RefCell, RefMut};
19 19 use std::collections::HashSet;
20 20 use std::io::Seek;
21 21 use std::io::SeekFrom;
22 22 use std::io::Write as IoWrite;
23 23 use std::path::{Path, PathBuf};
24 24
25 25 /// A repository on disk
26 26 pub struct Repo {
27 27 working_directory: PathBuf,
28 28 dot_hg: PathBuf,
29 29 store: PathBuf,
30 30 requirements: HashSet<String>,
31 31 config: Config,
32 32 dirstate_parents: LazyCell<DirstateParents>,
33 33 dirstate_data_file_uuid: LazyCell<Option<Vec<u8>>>,
34 34 dirstate_map: LazyCell<OwningDirstateMap>,
35 35 changelog: LazyCell<Changelog>,
36 36 manifestlog: LazyCell<Manifestlog>,
37 37 }
38 38
39 39 #[derive(Debug, derive_more::From)]
40 40 pub enum RepoError {
41 41 NotFound {
42 42 at: PathBuf,
43 43 },
44 44 #[from]
45 45 ConfigParseError(ConfigParseError),
46 46 #[from]
47 47 Other(HgError),
48 48 }
49 49
50 50 impl From<ConfigError> for RepoError {
51 51 fn from(error: ConfigError) -> Self {
52 52 match error {
53 53 ConfigError::Parse(error) => error.into(),
54 54 ConfigError::Other(error) => error.into(),
55 55 }
56 56 }
57 57 }
58 58
59 59 impl Repo {
60 60 /// tries to find nearest repository root in current working directory or
61 61 /// its ancestors
62 62 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
63 63 let current_directory = crate::utils::current_dir()?;
64 64 // ancestors() is inclusive: it first yields `current_directory`
65 65 // as-is.
66 66 for ancestor in current_directory.ancestors() {
67 67 if is_dir(ancestor.join(".hg"))? {
68 68 return Ok(ancestor.to_path_buf());
69 69 }
70 70 }
71 71 return Err(RepoError::NotFound {
72 72 at: current_directory,
73 73 });
74 74 }
75 75
76 76 /// Find a repository, either at the given path (which must contain a `.hg`
77 77 /// sub-directory) or by searching the current directory and its
78 78 /// ancestors.
79 79 ///
80 80 /// A method with two very different "modes" like this usually a code smell
81 81 /// to make two methods instead, but in this case an `Option` is what rhg
82 82 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
83 83 /// Having two methods would just move that `if` to almost all callers.
84 84 pub fn find(
85 85 config: &Config,
86 86 explicit_path: Option<PathBuf>,
87 87 ) -> Result<Self, RepoError> {
88 88 if let Some(root) = explicit_path {
89 89 if is_dir(root.join(".hg"))? {
90 90 Self::new_at_path(root.to_owned(), config)
91 91 } else if is_file(&root)? {
92 92 Err(HgError::unsupported("bundle repository").into())
93 93 } else {
94 94 Err(RepoError::NotFound {
95 95 at: root.to_owned(),
96 96 })
97 97 }
98 98 } else {
99 99 let root = Self::find_repo_root()?;
100 100 Self::new_at_path(root, config)
101 101 }
102 102 }
103 103
104 104 /// To be called after checking that `.hg` is a sub-directory
105 105 fn new_at_path(
106 106 working_directory: PathBuf,
107 107 config: &Config,
108 108 ) -> Result<Self, RepoError> {
109 109 let dot_hg = working_directory.join(".hg");
110 110
111 111 let mut repo_config_files = Vec::new();
112 112 repo_config_files.push(dot_hg.join("hgrc"));
113 113 repo_config_files.push(dot_hg.join("hgrc-not-shared"));
114 114
115 115 let hg_vfs = Vfs { base: &dot_hg };
116 116 let mut reqs = requirements::load_if_exists(hg_vfs)?;
117 117 let relative =
118 118 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
119 119 let shared =
120 120 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
121 121
122 122 // From `mercurial/localrepo.py`:
123 123 //
124 124 // if .hg/requires contains the sharesafe requirement, it means
125 125 // there exists a `.hg/store/requires` too and we should read it
126 126 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
127 127 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
128 128 // is not present, refer checkrequirementscompat() for that
129 129 //
130 130 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
131 131 // repository was shared the old way. We check the share source
132 132 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
133 133 // current repository needs to be reshared
134 134 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
135 135
136 136 let store_path;
137 137 if !shared {
138 138 store_path = dot_hg.join("store");
139 139 } else {
140 140 let bytes = hg_vfs.read("sharedpath")?;
141 141 let mut shared_path =
142 142 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
143 143 .to_owned();
144 144 if relative {
145 145 shared_path = dot_hg.join(shared_path)
146 146 }
147 147 if !is_dir(&shared_path)? {
148 148 return Err(HgError::corrupted(format!(
149 149 ".hg/sharedpath points to nonexistent directory {}",
150 150 shared_path.display()
151 151 ))
152 152 .into());
153 153 }
154 154
155 155 store_path = shared_path.join("store");
156 156
157 157 let source_is_share_safe =
158 158 requirements::load(Vfs { base: &shared_path })?
159 159 .contains(requirements::SHARESAFE_REQUIREMENT);
160 160
161 161 if share_safe != source_is_share_safe {
162 162 return Err(HgError::unsupported("share-safe mismatch").into());
163 163 }
164 164
165 165 if share_safe {
166 166 repo_config_files.insert(0, shared_path.join("hgrc"))
167 167 }
168 168 }
169 169 if share_safe {
170 170 reqs.extend(requirements::load(Vfs { base: &store_path })?);
171 171 }
172 172
173 173 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
174 174 config.combine_with_repo(&repo_config_files)?
175 175 } else {
176 176 config.clone()
177 177 };
178 178
179 179 let repo = Self {
180 180 requirements: reqs,
181 181 working_directory,
182 182 store: store_path,
183 183 dot_hg,
184 184 config: repo_config,
185 185 dirstate_parents: LazyCell::new(),
186 186 dirstate_data_file_uuid: LazyCell::new(),
187 187 dirstate_map: LazyCell::new(),
188 188 changelog: LazyCell::new(),
189 189 manifestlog: LazyCell::new(),
190 190 };
191 191
192 192 requirements::check(&repo)?;
193 193
194 194 Ok(repo)
195 195 }
196 196
197 197 pub fn working_directory_path(&self) -> &Path {
198 198 &self.working_directory
199 199 }
200 200
201 201 pub fn requirements(&self) -> &HashSet<String> {
202 202 &self.requirements
203 203 }
204 204
205 205 pub fn config(&self) -> &Config {
206 206 &self.config
207 207 }
208 208
209 209 /// For accessing repository files (in `.hg`), except for the store
210 210 /// (`.hg/store`).
211 211 pub fn hg_vfs(&self) -> Vfs<'_> {
212 212 Vfs { base: &self.dot_hg }
213 213 }
214 214
215 215 /// For accessing repository store files (in `.hg/store`)
216 216 pub fn store_vfs(&self) -> Vfs<'_> {
217 217 Vfs { base: &self.store }
218 218 }
219 219
220 220 /// For accessing the working copy
221 221 pub fn working_directory_vfs(&self) -> Vfs<'_> {
222 222 Vfs {
223 223 base: &self.working_directory,
224 224 }
225 225 }
226 226
227 227 pub fn try_with_wlock_no_wait<R>(
228 228 &self,
229 229 f: impl FnOnce() -> R,
230 230 ) -> Result<R, LockError> {
231 231 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
232 232 }
233 233
234 234 pub fn has_dirstate_v2(&self) -> bool {
235 235 self.requirements
236 236 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
237 237 }
238 238
239 239 pub fn has_sparse(&self) -> bool {
240 240 self.requirements.contains(requirements::SPARSE_REQUIREMENT)
241 241 }
242 242
243 243 pub fn has_narrow(&self) -> bool {
244 244 self.requirements.contains(requirements::NARROW_REQUIREMENT)
245 245 }
246 246
247 247 pub fn has_nodemap(&self) -> bool {
248 248 self.requirements
249 249 .contains(requirements::NODEMAP_REQUIREMENT)
250 250 }
251 251
252 252 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
253 253 Ok(self
254 254 .hg_vfs()
255 255 .read("dirstate")
256 256 .io_not_found_as_none()?
257 257 .unwrap_or(Vec::new()))
258 258 }
259 259
260 260 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
261 261 Ok(*self
262 262 .dirstate_parents
263 263 .get_or_init(|| self.read_dirstate_parents())?)
264 264 }
265 265
266 266 fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
267 267 let dirstate = self.dirstate_file_contents()?;
268 268 let parents = if dirstate.is_empty() {
269 269 if self.has_dirstate_v2() {
270 270 self.dirstate_data_file_uuid.set(None);
271 271 }
272 272 DirstateParents::NULL
273 273 } else if self.has_dirstate_v2() {
274 274 let docket =
275 275 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
276 276 self.dirstate_data_file_uuid
277 277 .set(Some(docket.uuid.to_owned()));
278 278 docket.parents()
279 279 } else {
280 280 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
281 281 .clone()
282 282 };
283 283 self.dirstate_parents.set(parents);
284 284 Ok(parents)
285 285 }
286 286
287 287 fn read_dirstate_data_file_uuid(
288 288 &self,
289 289 ) -> Result<Option<Vec<u8>>, HgError> {
290 290 assert!(
291 291 self.has_dirstate_v2(),
292 292 "accessing dirstate data file ID without dirstate-v2"
293 293 );
294 294 let dirstate = self.dirstate_file_contents()?;
295 295 if dirstate.is_empty() {
296 296 self.dirstate_parents.set(DirstateParents::NULL);
297 297 Ok(None)
298 298 } else {
299 299 let docket =
300 300 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
301 301 self.dirstate_parents.set(docket.parents());
302 302 Ok(Some(docket.uuid.to_owned()))
303 303 }
304 304 }
305 305
306 306 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
307 307 let dirstate_file_contents = self.dirstate_file_contents()?;
308 308 if dirstate_file_contents.is_empty() {
309 309 self.dirstate_parents.set(DirstateParents::NULL);
310 310 if self.has_dirstate_v2() {
311 311 self.dirstate_data_file_uuid.set(None);
312 312 }
313 313 Ok(OwningDirstateMap::new_empty(Vec::new()))
314 314 } else if self.has_dirstate_v2() {
315 315 let docket = crate::dirstate_tree::on_disk::read_docket(
316 316 &dirstate_file_contents,
317 317 )?;
318 318 self.dirstate_parents.set(docket.parents());
319 319 self.dirstate_data_file_uuid
320 320 .set(Some(docket.uuid.to_owned()));
321 321 let data_size = docket.data_size();
322 322 let metadata = docket.tree_metadata();
323 if let Some(data_mmap) = self
323 if crate::vfs::is_on_nfs_mount(docket.data_filename()) {
324 // Don't mmap on NFS to prevent `SIGBUS` error on deletion
325 OwningDirstateMap::new_v2(
326 self.hg_vfs().read(docket.data_filename())?,
327 data_size,
328 metadata,
329 )
330 } else if let Some(data_mmap) = self
324 331 .hg_vfs()
325 332 .mmap_open(docket.data_filename())
326 333 .io_not_found_as_none()?
327 334 {
328 335 OwningDirstateMap::new_v2(data_mmap, data_size, metadata)
329 336 } else {
330 337 OwningDirstateMap::new_v2(Vec::new(), data_size, metadata)
331 338 }
332 339 } else {
333 340 let (map, parents) =
334 341 OwningDirstateMap::new_v1(dirstate_file_contents)?;
335 342 self.dirstate_parents.set(parents);
336 343 Ok(map)
337 344 }
338 345 }
339 346
340 347 pub fn dirstate_map(
341 348 &self,
342 349 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
343 350 self.dirstate_map.get_or_init(|| self.new_dirstate_map())
344 351 }
345 352
346 353 pub fn dirstate_map_mut(
347 354 &self,
348 355 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
349 356 self.dirstate_map
350 357 .get_mut_or_init(|| self.new_dirstate_map())
351 358 }
352 359
353 360 fn new_changelog(&self) -> Result<Changelog, HgError> {
354 361 Changelog::open(&self.store_vfs(), self.has_nodemap())
355 362 }
356 363
357 364 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
358 365 self.changelog.get_or_init(|| self.new_changelog())
359 366 }
360 367
361 368 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
362 369 self.changelog.get_mut_or_init(|| self.new_changelog())
363 370 }
364 371
365 372 fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
366 373 Manifestlog::open(&self.store_vfs(), self.has_nodemap())
367 374 }
368 375
369 376 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
370 377 self.manifestlog.get_or_init(|| self.new_manifestlog())
371 378 }
372 379
373 380 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
374 381 self.manifestlog.get_mut_or_init(|| self.new_manifestlog())
375 382 }
376 383
377 384 /// Returns the manifest of the *changeset* with the given node ID
378 385 pub fn manifest_for_node(
379 386 &self,
380 387 node: impl Into<NodePrefix>,
381 388 ) -> Result<Manifest, RevlogError> {
382 389 self.manifestlog()?.data_for_node(
383 390 self.changelog()?
384 391 .data_for_node(node.into())?
385 392 .manifest_node()?
386 393 .into(),
387 394 )
388 395 }
389 396
390 397 /// Returns the manifest of the *changeset* with the given revision number
391 398 pub fn manifest_for_rev(
392 399 &self,
393 400 revision: Revision,
394 401 ) -> Result<Manifest, RevlogError> {
395 402 self.manifestlog()?.data_for_node(
396 403 self.changelog()?
397 404 .data_for_rev(revision)?
398 405 .manifest_node()?
399 406 .into(),
400 407 )
401 408 }
402 409
403 410 pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
404 411 if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
405 412 Ok(entry.tracked())
406 413 } else {
407 414 Ok(false)
408 415 }
409 416 }
410 417
411 418 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
412 419 Filelog::open(self, path)
413 420 }
414 421
415 422 /// Write to disk any updates that were made through `dirstate_map_mut`.
416 423 ///
417 424 /// The "wlock" must be held while calling this.
418 425 /// See for example `try_with_wlock_no_wait`.
419 426 ///
420 427 /// TODO: have a `WritableRepo` type only accessible while holding the
421 428 /// lock?
422 429 pub fn write_dirstate(&self) -> Result<(), DirstateError> {
423 430 let map = self.dirstate_map()?;
424 431 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
425 432 // it’s unset
426 433 let parents = self.dirstate_parents()?;
427 434 let (packed_dirstate, old_uuid_to_remove) = if self.has_dirstate_v2() {
428 435 let uuid_opt = self
429 436 .dirstate_data_file_uuid
430 437 .get_or_init(|| self.read_dirstate_data_file_uuid())?;
431 438 let uuid_opt = uuid_opt.as_ref();
432 439 let can_append = uuid_opt.is_some();
433 440 let (data, tree_metadata, append, old_data_size) =
434 441 map.pack_v2(can_append)?;
435 442
436 443 // Reuse the uuid, or generate a new one, keeping the old for
437 444 // deletion.
438 445 let (uuid, old_uuid) = match uuid_opt {
439 446 Some(uuid) => {
440 447 let as_str = std::str::from_utf8(uuid)
441 448 .map_err(|_| {
442 449 HgError::corrupted(
443 450 "non-UTF-8 dirstate data file ID",
444 451 )
445 452 })?
446 453 .to_owned();
447 454 if append {
448 455 (as_str, None)
449 456 } else {
450 457 (DirstateDocket::new_uid(), Some(as_str))
451 458 }
452 459 }
453 460 None => (DirstateDocket::new_uid(), None),
454 461 };
455 462
456 463 let data_filename = format!("dirstate.{}", uuid);
457 464 let data_filename = self.hg_vfs().join(data_filename);
458 465 let mut options = std::fs::OpenOptions::new();
459 466 options.write(true);
460 467
461 468 // Why are we not using the O_APPEND flag when appending?
462 469 //
463 470 // - O_APPEND makes it trickier to deal with garbage at the end of
464 471 // the file, left by a previous uncommitted transaction. By
465 472 // starting the write at [old_data_size] we make sure we erase
466 473 // all such garbage.
467 474 //
468 475 // - O_APPEND requires to special-case 0-byte writes, whereas we
469 476 // don't need that.
470 477 //
471 478 // - Some OSes have bugs in implementation O_APPEND:
472 479 // revlog.py talks about a Solaris bug, but we also saw some ZFS
473 480 // bug: https://github.com/openzfs/zfs/pull/3124,
474 481 // https://github.com/openzfs/zfs/issues/13370
475 482 //
476 483 if !append {
477 484 options.create_new(true);
478 485 }
479 486
480 487 let data_size = (|| {
481 488 // TODO: loop and try another random ID if !append and this
482 489 // returns `ErrorKind::AlreadyExists`? Collision chance of two
483 490 // random IDs is one in 2**32
484 491 let mut file = options.open(&data_filename)?;
485 492 if append {
486 493 file.seek(SeekFrom::Start(old_data_size as u64))?;
487 494 }
488 495 file.write_all(&data)?;
489 496 file.flush()?;
490 497 file.seek(SeekFrom::Current(0))
491 498 })()
492 499 .when_writing_file(&data_filename)?;
493 500
494 501 let packed_dirstate = DirstateDocket::serialize(
495 502 parents,
496 503 tree_metadata,
497 504 data_size,
498 505 uuid.as_bytes(),
499 506 )
500 507 .map_err(|_: std::num::TryFromIntError| {
501 508 HgError::corrupted("overflow in dirstate docket serialization")
502 509 })?;
503 510
504 511 (packed_dirstate, old_uuid)
505 512 } else {
506 513 (map.pack_v1(parents)?, None)
507 514 };
508 515
509 516 let vfs = self.hg_vfs();
510 517 vfs.atomic_write("dirstate", &packed_dirstate)?;
511 518 if let Some(uuid) = old_uuid_to_remove {
512 519 // Remove the old data file after the new docket pointing to the
513 520 // new data file was written.
514 521 vfs.remove_file(format!("dirstate.{}", uuid))?;
515 522 }
516 523 Ok(())
517 524 }
518 525 }
519 526
520 527 /// Lazily-initialized component of `Repo` with interior mutability
521 528 ///
522 529 /// This differs from `OnceCell` in that the value can still be "deinitialized"
523 530 /// later by setting its inner `Option` to `None`. It also takes the
524 531 /// initialization function as an argument when the value is requested, not
525 532 /// when the instance is created.
526 533 struct LazyCell<T> {
527 534 value: RefCell<Option<T>>,
528 535 }
529 536
530 537 impl<T> LazyCell<T> {
531 538 fn new() -> Self {
532 539 Self {
533 540 value: RefCell::new(None),
534 541 }
535 542 }
536 543
537 544 fn set(&self, value: T) {
538 545 *self.value.borrow_mut() = Some(value)
539 546 }
540 547
541 548 fn get_or_init<E>(
542 549 &self,
543 550 init: impl Fn() -> Result<T, E>,
544 551 ) -> Result<Ref<T>, E> {
545 552 let mut borrowed = self.value.borrow();
546 553 if borrowed.is_none() {
547 554 drop(borrowed);
548 555 // Only use `borrow_mut` if it is really needed to avoid panic in
549 556 // case there is another outstanding borrow but mutation is not
550 557 // needed.
551 558 *self.value.borrow_mut() = Some(init()?);
552 559 borrowed = self.value.borrow()
553 560 }
554 561 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
555 562 }
556 563
557 564 fn get_mut_or_init<E>(
558 565 &self,
559 566 init: impl Fn() -> Result<T, E>,
560 567 ) -> Result<RefMut<T>, E> {
561 568 let mut borrowed = self.value.borrow_mut();
562 569 if borrowed.is_none() {
563 570 *borrowed = Some(init()?);
564 571 }
565 572 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
566 573 }
567 574 }
@@ -1,174 +1,195 b''
1 1 use crate::errors::{HgError, IoErrorContext, IoResultExt};
2 2 use memmap2::{Mmap, MmapOptions};
3 3 use std::io::{ErrorKind, Write};
4 4 use std::path::{Path, PathBuf};
5 5
6 6 /// Filesystem access abstraction for the contents of a given "base" diretory
7 7 #[derive(Clone, Copy)]
8 8 pub struct Vfs<'a> {
9 9 pub(crate) base: &'a Path,
10 10 }
11 11
12 12 struct FileNotFound(std::io::Error, PathBuf);
13 13
14 14 impl Vfs<'_> {
15 15 pub fn join(&self, relative_path: impl AsRef<Path>) -> PathBuf {
16 16 self.base.join(relative_path)
17 17 }
18 18
19 19 pub fn symlink_metadata(
20 20 &self,
21 21 relative_path: impl AsRef<Path>,
22 22 ) -> Result<std::fs::Metadata, HgError> {
23 23 let path = self.join(relative_path);
24 24 std::fs::symlink_metadata(&path).when_reading_file(&path)
25 25 }
26 26
27 27 pub fn read_link(
28 28 &self,
29 29 relative_path: impl AsRef<Path>,
30 30 ) -> Result<PathBuf, HgError> {
31 31 let path = self.join(relative_path);
32 32 std::fs::read_link(&path).when_reading_file(&path)
33 33 }
34 34
35 35 pub fn read(
36 36 &self,
37 37 relative_path: impl AsRef<Path>,
38 38 ) -> Result<Vec<u8>, HgError> {
39 39 let path = self.join(relative_path);
40 40 std::fs::read(&path).when_reading_file(&path)
41 41 }
42 42
43 43 /// Returns `Ok(None)` if the file does not exist.
44 44 pub fn try_read(
45 45 &self,
46 46 relative_path: impl AsRef<Path>,
47 47 ) -> Result<Option<Vec<u8>>, HgError> {
48 48 match self.read(relative_path) {
49 49 Err(e) => match &e {
50 50 HgError::IoError { error, .. } => match error.kind() {
51 51 ErrorKind::NotFound => return Ok(None),
52 52 _ => Err(e),
53 53 },
54 54 _ => Err(e),
55 55 },
56 56 Ok(v) => Ok(Some(v)),
57 57 }
58 58 }
59 59
60 60 fn mmap_open_gen(
61 61 &self,
62 62 relative_path: impl AsRef<Path>,
63 63 ) -> Result<Result<Mmap, FileNotFound>, HgError> {
64 64 let path = self.join(relative_path);
65 65 let file = match std::fs::File::open(&path) {
66 66 Err(err) => {
67 67 if let ErrorKind::NotFound = err.kind() {
68 68 return Ok(Err(FileNotFound(err, path)));
69 69 };
70 70 return (Err(err)).when_reading_file(&path);
71 71 }
72 72 Ok(file) => file,
73 73 };
74 74 // TODO: what are the safety requirements here?
75 75 let mmap = unsafe { MmapOptions::new().map(&file) }
76 76 .when_reading_file(&path)?;
77 77 Ok(Ok(mmap))
78 78 }
79 79
80 80 pub fn mmap_open_opt(
81 81 &self,
82 82 relative_path: impl AsRef<Path>,
83 83 ) -> Result<Option<Mmap>, HgError> {
84 84 self.mmap_open_gen(relative_path).map(|res| res.ok())
85 85 }
86 86
87 87 pub fn mmap_open(
88 88 &self,
89 89 relative_path: impl AsRef<Path>,
90 90 ) -> Result<Mmap, HgError> {
91 91 match self.mmap_open_gen(relative_path)? {
92 92 Err(FileNotFound(err, path)) => Err(err).when_reading_file(&path),
93 93 Ok(res) => Ok(res),
94 94 }
95 95 }
96 96
97 97 pub fn rename(
98 98 &self,
99 99 relative_from: impl AsRef<Path>,
100 100 relative_to: impl AsRef<Path>,
101 101 ) -> Result<(), HgError> {
102 102 let from = self.join(relative_from);
103 103 let to = self.join(relative_to);
104 104 std::fs::rename(&from, &to)
105 105 .with_context(|| IoErrorContext::RenamingFile { from, to })
106 106 }
107 107
108 108 pub fn remove_file(
109 109 &self,
110 110 relative_path: impl AsRef<Path>,
111 111 ) -> Result<(), HgError> {
112 112 let path = self.join(relative_path);
113 113 std::fs::remove_file(&path)
114 114 .with_context(|| IoErrorContext::RemovingFile(path))
115 115 }
116 116
117 117 #[cfg(unix)]
118 118 pub fn create_symlink(
119 119 &self,
120 120 relative_link_path: impl AsRef<Path>,
121 121 target_path: impl AsRef<Path>,
122 122 ) -> Result<(), HgError> {
123 123 let link_path = self.join(relative_link_path);
124 124 std::os::unix::fs::symlink(target_path, &link_path)
125 125 .when_writing_file(&link_path)
126 126 }
127 127
128 128 /// Write `contents` into a temporary file, then rename to `relative_path`.
129 129 /// This makes writing to a file "atomic": a reader opening that path will
130 130 /// see either the previous contents of the file or the complete new
131 131 /// content, never a partial write.
132 132 pub fn atomic_write(
133 133 &self,
134 134 relative_path: impl AsRef<Path>,
135 135 contents: &[u8],
136 136 ) -> Result<(), HgError> {
137 137 let mut tmp = tempfile::NamedTempFile::new_in(self.base)
138 138 .when_writing_file(self.base)?;
139 139 tmp.write_all(contents)
140 140 .and_then(|()| tmp.flush())
141 141 .when_writing_file(tmp.path())?;
142 142 let path = self.join(relative_path);
143 143 tmp.persist(&path)
144 144 .map_err(|e| e.error)
145 145 .when_writing_file(&path)?;
146 146 Ok(())
147 147 }
148 148 }
149 149
150 150 fn fs_metadata(
151 151 path: impl AsRef<Path>,
152 152 ) -> Result<Option<std::fs::Metadata>, HgError> {
153 153 let path = path.as_ref();
154 154 match std::fs::metadata(path) {
155 155 Ok(meta) => Ok(Some(meta)),
156 156 Err(error) => match error.kind() {
157 157 // TODO: when we require a Rust version where `NotADirectory` is
158 158 // stable, invert this logic and return None for it and `NotFound`
159 159 // and propagate any other error.
160 160 ErrorKind::PermissionDenied => Err(error).with_context(|| {
161 161 IoErrorContext::ReadingMetadata(path.to_owned())
162 162 }),
163 163 _ => Ok(None),
164 164 },
165 165 }
166 166 }
167 167
168 168 pub(crate) fn is_dir(path: impl AsRef<Path>) -> Result<bool, HgError> {
169 169 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_dir()))
170 170 }
171 171
172 172 pub(crate) fn is_file(path: impl AsRef<Path>) -> Result<bool, HgError> {
173 173 Ok(fs_metadata(path)?.map_or(false, |meta| meta.is_file()))
174 174 }
175
176 /// Returns whether the given `path` is on a network file system.
177 /// Taken from `cargo`'s codebase.
178 #[cfg(target_os = "linux")]
179 pub(crate) fn is_on_nfs_mount(path: impl AsRef<Path>) -> bool {
180 use std::ffi::CString;
181 use std::mem;
182 use std::os::unix::prelude::*;
183
184 let path = match CString::new(path.as_ref().as_os_str().as_bytes()) {
185 Ok(path) => path,
186 Err(_) => return false,
187 };
188
189 unsafe {
190 let mut buf: libc::statfs = mem::zeroed();
191 let r = libc::statfs(path.as_ptr(), &mut buf);
192
193 r == 0 && buf.f_type as u32 == libc::NFS_SUPER_MAGIC as u32
194 }
195 }
General Comments 0
You need to be logged in to leave comments. Login now