##// END OF EJS Templates
rust-repo: extract a function for checking nodemap requirement...
Martin von Zweigbergk -
r49982:9b5334c1 default
parent child Browse files
Show More
@@ -1,523 +1,522 b''
1 1 use crate::changelog::Changelog;
2 2 use crate::config::{Config, ConfigError, ConfigParseError};
3 3 use crate::dirstate::DirstateParents;
4 4 use crate::dirstate_tree::on_disk::Docket as DirstateDocket;
5 5 use crate::dirstate_tree::owning::OwningDirstateMap;
6 6 use crate::errors::HgResultExt;
7 7 use crate::errors::{HgError, IoResultExt};
8 8 use crate::lock::{try_with_lock_no_wait, LockError};
9 9 use crate::manifest::{Manifest, Manifestlog};
10 10 use crate::revlog::filelog::Filelog;
11 11 use crate::revlog::revlog::RevlogError;
12 12 use crate::utils::files::get_path_from_bytes;
13 13 use crate::utils::hg_path::HgPath;
14 14 use crate::utils::SliceExt;
15 15 use crate::vfs::{is_dir, is_file, Vfs};
16 16 use crate::{requirements, NodePrefix};
17 17 use crate::{DirstateError, Revision};
18 18 use std::cell::{Ref, RefCell, RefMut};
19 19 use std::collections::HashSet;
20 20 use std::io::Seek;
21 21 use std::io::SeekFrom;
22 22 use std::io::Write as IoWrite;
23 23 use std::path::{Path, PathBuf};
24 24
25 25 /// A repository on disk
26 26 pub struct Repo {
27 27 working_directory: PathBuf,
28 28 dot_hg: PathBuf,
29 29 store: PathBuf,
30 30 requirements: HashSet<String>,
31 31 config: Config,
32 32 dirstate_parents: LazyCell<DirstateParents, HgError>,
33 33 dirstate_data_file_uuid: LazyCell<Option<Vec<u8>>, HgError>,
34 34 dirstate_map: LazyCell<OwningDirstateMap, DirstateError>,
35 35 changelog: LazyCell<Changelog, HgError>,
36 36 manifestlog: LazyCell<Manifestlog, HgError>,
37 37 }
38 38
39 39 #[derive(Debug, derive_more::From)]
40 40 pub enum RepoError {
41 41 NotFound {
42 42 at: PathBuf,
43 43 },
44 44 #[from]
45 45 ConfigParseError(ConfigParseError),
46 46 #[from]
47 47 Other(HgError),
48 48 }
49 49
50 50 impl From<ConfigError> for RepoError {
51 51 fn from(error: ConfigError) -> Self {
52 52 match error {
53 53 ConfigError::Parse(error) => error.into(),
54 54 ConfigError::Other(error) => error.into(),
55 55 }
56 56 }
57 57 }
58 58
59 59 impl Repo {
60 60 /// tries to find nearest repository root in current working directory or
61 61 /// its ancestors
62 62 pub fn find_repo_root() -> Result<PathBuf, RepoError> {
63 63 let current_directory = crate::utils::current_dir()?;
64 64 // ancestors() is inclusive: it first yields `current_directory`
65 65 // as-is.
66 66 for ancestor in current_directory.ancestors() {
67 67 if is_dir(ancestor.join(".hg"))? {
68 68 return Ok(ancestor.to_path_buf());
69 69 }
70 70 }
71 71 return Err(RepoError::NotFound {
72 72 at: current_directory,
73 73 });
74 74 }
75 75
76 76 /// Find a repository, either at the given path (which must contain a `.hg`
77 77 /// sub-directory) or by searching the current directory and its
78 78 /// ancestors.
79 79 ///
80 80 /// A method with two very different "modes" like this usually a code smell
81 81 /// to make two methods instead, but in this case an `Option` is what rhg
82 82 /// sub-commands get from Clap for the `-R` / `--repository` CLI argument.
83 83 /// Having two methods would just move that `if` to almost all callers.
84 84 pub fn find(
85 85 config: &Config,
86 86 explicit_path: Option<PathBuf>,
87 87 ) -> Result<Self, RepoError> {
88 88 if let Some(root) = explicit_path {
89 89 if is_dir(root.join(".hg"))? {
90 90 Self::new_at_path(root.to_owned(), config)
91 91 } else if is_file(&root)? {
92 92 Err(HgError::unsupported("bundle repository").into())
93 93 } else {
94 94 Err(RepoError::NotFound {
95 95 at: root.to_owned(),
96 96 })
97 97 }
98 98 } else {
99 99 let root = Self::find_repo_root()?;
100 100 Self::new_at_path(root, config)
101 101 }
102 102 }
103 103
104 104 /// To be called after checking that `.hg` is a sub-directory
105 105 fn new_at_path(
106 106 working_directory: PathBuf,
107 107 config: &Config,
108 108 ) -> Result<Self, RepoError> {
109 109 let dot_hg = working_directory.join(".hg");
110 110
111 111 let mut repo_config_files = Vec::new();
112 112 repo_config_files.push(dot_hg.join("hgrc"));
113 113 repo_config_files.push(dot_hg.join("hgrc-not-shared"));
114 114
115 115 let hg_vfs = Vfs { base: &dot_hg };
116 116 let mut reqs = requirements::load_if_exists(hg_vfs)?;
117 117 let relative =
118 118 reqs.contains(requirements::RELATIVE_SHARED_REQUIREMENT);
119 119 let shared =
120 120 reqs.contains(requirements::SHARED_REQUIREMENT) || relative;
121 121
122 122 // From `mercurial/localrepo.py`:
123 123 //
124 124 // if .hg/requires contains the sharesafe requirement, it means
125 125 // there exists a `.hg/store/requires` too and we should read it
126 126 // NOTE: presence of SHARESAFE_REQUIREMENT imply that store requirement
127 127 // is present. We never write SHARESAFE_REQUIREMENT for a repo if store
128 128 // is not present, refer checkrequirementscompat() for that
129 129 //
130 130 // However, if SHARESAFE_REQUIREMENT is not present, it means that the
131 131 // repository was shared the old way. We check the share source
132 132 // .hg/requires for SHARESAFE_REQUIREMENT to detect whether the
133 133 // current repository needs to be reshared
134 134 let share_safe = reqs.contains(requirements::SHARESAFE_REQUIREMENT);
135 135
136 136 let store_path;
137 137 if !shared {
138 138 store_path = dot_hg.join("store");
139 139 } else {
140 140 let bytes = hg_vfs.read("sharedpath")?;
141 141 let mut shared_path =
142 142 get_path_from_bytes(bytes.trim_end_matches(|b| b == b'\n'))
143 143 .to_owned();
144 144 if relative {
145 145 shared_path = dot_hg.join(shared_path)
146 146 }
147 147 if !is_dir(&shared_path)? {
148 148 return Err(HgError::corrupted(format!(
149 149 ".hg/sharedpath points to nonexistent directory {}",
150 150 shared_path.display()
151 151 ))
152 152 .into());
153 153 }
154 154
155 155 store_path = shared_path.join("store");
156 156
157 157 let source_is_share_safe =
158 158 requirements::load(Vfs { base: &shared_path })?
159 159 .contains(requirements::SHARESAFE_REQUIREMENT);
160 160
161 161 if share_safe != source_is_share_safe {
162 162 return Err(HgError::unsupported("share-safe mismatch").into());
163 163 }
164 164
165 165 if share_safe {
166 166 repo_config_files.insert(0, shared_path.join("hgrc"))
167 167 }
168 168 }
169 169 if share_safe {
170 170 reqs.extend(requirements::load(Vfs { base: &store_path })?);
171 171 }
172 172
173 173 let repo_config = if std::env::var_os("HGRCSKIPREPO").is_none() {
174 174 config.combine_with_repo(&repo_config_files)?
175 175 } else {
176 176 config.clone()
177 177 };
178 178
179 179 let repo = Self {
180 180 requirements: reqs,
181 181 working_directory,
182 182 store: store_path,
183 183 dot_hg,
184 184 config: repo_config,
185 185 dirstate_parents: LazyCell::new(Self::read_dirstate_parents),
186 186 dirstate_data_file_uuid: LazyCell::new(
187 187 Self::read_dirstate_data_file_uuid,
188 188 ),
189 189 dirstate_map: LazyCell::new(Self::new_dirstate_map),
190 190 changelog: LazyCell::new(Self::new_changelog),
191 191 manifestlog: LazyCell::new(Self::new_manifestlog),
192 192 };
193 193
194 194 requirements::check(&repo)?;
195 195
196 196 Ok(repo)
197 197 }
198 198
199 199 pub fn working_directory_path(&self) -> &Path {
200 200 &self.working_directory
201 201 }
202 202
203 203 pub fn requirements(&self) -> &HashSet<String> {
204 204 &self.requirements
205 205 }
206 206
207 207 pub fn config(&self) -> &Config {
208 208 &self.config
209 209 }
210 210
211 211 /// For accessing repository files (in `.hg`), except for the store
212 212 /// (`.hg/store`).
213 213 pub fn hg_vfs(&self) -> Vfs<'_> {
214 214 Vfs { base: &self.dot_hg }
215 215 }
216 216
217 217 /// For accessing repository store files (in `.hg/store`)
218 218 pub fn store_vfs(&self) -> Vfs<'_> {
219 219 Vfs { base: &self.store }
220 220 }
221 221
222 222 /// For accessing the working copy
223 223 pub fn working_directory_vfs(&self) -> Vfs<'_> {
224 224 Vfs {
225 225 base: &self.working_directory,
226 226 }
227 227 }
228 228
229 229 pub fn try_with_wlock_no_wait<R>(
230 230 &self,
231 231 f: impl FnOnce() -> R,
232 232 ) -> Result<R, LockError> {
233 233 try_with_lock_no_wait(self.hg_vfs(), "wlock", f)
234 234 }
235 235
236 236 pub fn has_dirstate_v2(&self) -> bool {
237 237 self.requirements
238 238 .contains(requirements::DIRSTATE_V2_REQUIREMENT)
239 239 }
240 240
241 241 pub fn has_sparse(&self) -> bool {
242 242 self.requirements.contains(requirements::SPARSE_REQUIREMENT)
243 243 }
244 244
245 245 pub fn has_narrow(&self) -> bool {
246 246 self.requirements.contains(requirements::NARROW_REQUIREMENT)
247 247 }
248 248
249 pub fn has_nodemap(&self) -> bool {
250 self.requirements
251 .contains(requirements::NODEMAP_REQUIREMENT)
252 }
253
249 254 fn dirstate_file_contents(&self) -> Result<Vec<u8>, HgError> {
250 255 Ok(self
251 256 .hg_vfs()
252 257 .read("dirstate")
253 258 .io_not_found_as_none()?
254 259 .unwrap_or(Vec::new()))
255 260 }
256 261
257 262 pub fn dirstate_parents(&self) -> Result<DirstateParents, HgError> {
258 263 Ok(*self.dirstate_parents.get_or_init(self)?)
259 264 }
260 265
261 266 fn read_dirstate_parents(&self) -> Result<DirstateParents, HgError> {
262 267 let dirstate = self.dirstate_file_contents()?;
263 268 let parents = if dirstate.is_empty() {
264 269 if self.has_dirstate_v2() {
265 270 self.dirstate_data_file_uuid.set(None);
266 271 }
267 272 DirstateParents::NULL
268 273 } else if self.has_dirstate_v2() {
269 274 let docket =
270 275 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
271 276 self.dirstate_data_file_uuid
272 277 .set(Some(docket.uuid.to_owned()));
273 278 docket.parents()
274 279 } else {
275 280 crate::dirstate::parsers::parse_dirstate_parents(&dirstate)?
276 281 .clone()
277 282 };
278 283 self.dirstate_parents.set(parents);
279 284 Ok(parents)
280 285 }
281 286
282 287 fn read_dirstate_data_file_uuid(
283 288 &self,
284 289 ) -> Result<Option<Vec<u8>>, HgError> {
285 290 assert!(
286 291 self.has_dirstate_v2(),
287 292 "accessing dirstate data file ID without dirstate-v2"
288 293 );
289 294 let dirstate = self.dirstate_file_contents()?;
290 295 if dirstate.is_empty() {
291 296 self.dirstate_parents.set(DirstateParents::NULL);
292 297 Ok(None)
293 298 } else {
294 299 let docket =
295 300 crate::dirstate_tree::on_disk::read_docket(&dirstate)?;
296 301 self.dirstate_parents.set(docket.parents());
297 302 Ok(Some(docket.uuid.to_owned()))
298 303 }
299 304 }
300 305
301 306 fn new_dirstate_map(&self) -> Result<OwningDirstateMap, DirstateError> {
302 307 let dirstate_file_contents = self.dirstate_file_contents()?;
303 308 if dirstate_file_contents.is_empty() {
304 309 self.dirstate_parents.set(DirstateParents::NULL);
305 310 if self.has_dirstate_v2() {
306 311 self.dirstate_data_file_uuid.set(None);
307 312 }
308 313 Ok(OwningDirstateMap::new_empty(Vec::new()))
309 314 } else if self.has_dirstate_v2() {
310 315 let docket = crate::dirstate_tree::on_disk::read_docket(
311 316 &dirstate_file_contents,
312 317 )?;
313 318 self.dirstate_parents.set(docket.parents());
314 319 self.dirstate_data_file_uuid
315 320 .set(Some(docket.uuid.to_owned()));
316 321 let data_size = docket.data_size();
317 322 let metadata = docket.tree_metadata();
318 323 if let Some(data_mmap) = self
319 324 .hg_vfs()
320 325 .mmap_open(docket.data_filename())
321 326 .io_not_found_as_none()?
322 327 {
323 328 OwningDirstateMap::new_v2(data_mmap, data_size, metadata)
324 329 } else {
325 330 OwningDirstateMap::new_v2(Vec::new(), data_size, metadata)
326 331 }
327 332 } else {
328 333 let (map, parents) =
329 334 OwningDirstateMap::new_v1(dirstate_file_contents)?;
330 335 self.dirstate_parents.set(parents);
331 336 Ok(map)
332 337 }
333 338 }
334 339
335 340 pub fn dirstate_map(
336 341 &self,
337 342 ) -> Result<Ref<OwningDirstateMap>, DirstateError> {
338 343 self.dirstate_map.get_or_init(self)
339 344 }
340 345
341 346 pub fn dirstate_map_mut(
342 347 &self,
343 348 ) -> Result<RefMut<OwningDirstateMap>, DirstateError> {
344 349 self.dirstate_map.get_mut_or_init(self)
345 350 }
346 351
347 352 fn new_changelog(&self) -> Result<Changelog, HgError> {
348 let use_nodemap = self
349 .requirements
350 .contains(requirements::NODEMAP_REQUIREMENT);
351 Changelog::open(&self.store_vfs(), use_nodemap)
353 Changelog::open(&self.store_vfs(), self.has_nodemap())
352 354 }
353 355
354 356 pub fn changelog(&self) -> Result<Ref<Changelog>, HgError> {
355 357 self.changelog.get_or_init(self)
356 358 }
357 359
358 360 pub fn changelog_mut(&self) -> Result<RefMut<Changelog>, HgError> {
359 361 self.changelog.get_mut_or_init(self)
360 362 }
361 363
362 364 fn new_manifestlog(&self) -> Result<Manifestlog, HgError> {
363 let use_nodemap = self
364 .requirements
365 .contains(requirements::NODEMAP_REQUIREMENT);
366 Manifestlog::open(&self.store_vfs(), use_nodemap)
365 Manifestlog::open(&self.store_vfs(), self.has_nodemap())
367 366 }
368 367
369 368 pub fn manifestlog(&self) -> Result<Ref<Manifestlog>, HgError> {
370 369 self.manifestlog.get_or_init(self)
371 370 }
372 371
373 372 pub fn manifestlog_mut(&self) -> Result<RefMut<Manifestlog>, HgError> {
374 373 self.manifestlog.get_mut_or_init(self)
375 374 }
376 375
377 376 /// Returns the manifest of the *changeset* with the given node ID
378 377 pub fn manifest_for_node(
379 378 &self,
380 379 node: impl Into<NodePrefix>,
381 380 ) -> Result<Manifest, RevlogError> {
382 381 self.manifestlog()?.data_for_node(
383 382 self.changelog()?
384 383 .data_for_node(node.into())?
385 384 .manifest_node()?
386 385 .into(),
387 386 )
388 387 }
389 388
390 389 /// Returns the manifest of the *changeset* with the given revision number
391 390 pub fn manifest_for_rev(
392 391 &self,
393 392 revision: Revision,
394 393 ) -> Result<Manifest, RevlogError> {
395 394 self.manifestlog()?.data_for_node(
396 395 self.changelog()?
397 396 .data_for_rev(revision)?
398 397 .manifest_node()?
399 398 .into(),
400 399 )
401 400 }
402 401
403 402 pub fn has_subrepos(&self) -> Result<bool, DirstateError> {
404 403 if let Some(entry) = self.dirstate_map()?.get(HgPath::new(".hgsub"))? {
405 404 Ok(entry.state().is_tracked())
406 405 } else {
407 406 Ok(false)
408 407 }
409 408 }
410 409
411 410 pub fn filelog(&self, path: &HgPath) -> Result<Filelog, HgError> {
412 411 Filelog::open(self, path)
413 412 }
414 413
415 414 /// Write to disk any updates that were made through `dirstate_map_mut`.
416 415 ///
417 416 /// The "wlock" must be held while calling this.
418 417 /// See for example `try_with_wlock_no_wait`.
419 418 ///
420 419 /// TODO: have a `WritableRepo` type only accessible while holding the
421 420 /// lock?
422 421 pub fn write_dirstate(&self) -> Result<(), DirstateError> {
423 422 let map = self.dirstate_map()?;
424 423 // TODO: Maintain a `DirstateMap::dirty` flag, and return early here if
425 424 // it’s unset
426 425 let parents = self.dirstate_parents()?;
427 426 let packed_dirstate = if self.has_dirstate_v2() {
428 427 let uuid = self.dirstate_data_file_uuid.get_or_init(self)?;
429 428 let mut uuid = uuid.as_ref();
430 429 let can_append = uuid.is_some();
431 430 let (data, tree_metadata, append) = map.pack_v2(can_append)?;
432 431 if !append {
433 432 uuid = None
434 433 }
435 434 let uuid = if let Some(uuid) = uuid {
436 435 std::str::from_utf8(uuid)
437 436 .map_err(|_| {
438 437 HgError::corrupted("non-UTF-8 dirstate data file ID")
439 438 })?
440 439 .to_owned()
441 440 } else {
442 441 DirstateDocket::new_uid()
443 442 };
444 443 let data_filename = format!("dirstate.{}", uuid);
445 444 let data_filename = self.hg_vfs().join(data_filename);
446 445 let mut options = std::fs::OpenOptions::new();
447 446 if append {
448 447 options.append(true);
449 448 } else {
450 449 options.write(true).create_new(true);
451 450 }
452 451 let data_size = (|| {
453 452 // TODO: loop and try another random ID if !append and this
454 453 // returns `ErrorKind::AlreadyExists`? Collision chance of two
455 454 // random IDs is one in 2**32
456 455 let mut file = options.open(&data_filename)?;
457 456 file.write_all(&data)?;
458 457 file.flush()?;
459 458 // TODO: use https://doc.rust-lang.org/std/io/trait.Seek.html#method.stream_position when we require Rust 1.51+
460 459 file.seek(SeekFrom::Current(0))
461 460 })()
462 461 .when_writing_file(&data_filename)?;
463 462 DirstateDocket::serialize(
464 463 parents,
465 464 tree_metadata,
466 465 data_size,
467 466 uuid.as_bytes(),
468 467 )
469 468 .map_err(|_: std::num::TryFromIntError| {
470 469 HgError::corrupted("overflow in dirstate docket serialization")
471 470 })?
472 471 } else {
473 472 map.pack_v1(parents)?
474 473 };
475 474 self.hg_vfs().atomic_write("dirstate", &packed_dirstate)?;
476 475 Ok(())
477 476 }
478 477 }
479 478
480 479 /// Lazily-initialized component of `Repo` with interior mutability
481 480 ///
482 481 /// This differs from `OnceCell` in that the value can still be "deinitialized"
483 482 /// later by setting its inner `Option` to `None`.
484 483 struct LazyCell<T, E> {
485 484 value: RefCell<Option<T>>,
486 485 // `Fn`s that don’t capture environment are zero-size, so this box does
487 486 // not allocate:
488 487 init: Box<dyn Fn(&Repo) -> Result<T, E>>,
489 488 }
490 489
491 490 impl<T, E> LazyCell<T, E> {
492 491 fn new(init: impl Fn(&Repo) -> Result<T, E> + 'static) -> Self {
493 492 Self {
494 493 value: RefCell::new(None),
495 494 init: Box::new(init),
496 495 }
497 496 }
498 497
499 498 fn set(&self, value: T) {
500 499 *self.value.borrow_mut() = Some(value)
501 500 }
502 501
503 502 fn get_or_init(&self, repo: &Repo) -> Result<Ref<T>, E> {
504 503 let mut borrowed = self.value.borrow();
505 504 if borrowed.is_none() {
506 505 drop(borrowed);
507 506 // Only use `borrow_mut` if it is really needed to avoid panic in
508 507 // case there is another outstanding borrow but mutation is not
509 508 // needed.
510 509 *self.value.borrow_mut() = Some((self.init)(repo)?);
511 510 borrowed = self.value.borrow()
512 511 }
513 512 Ok(Ref::map(borrowed, |option| option.as_ref().unwrap()))
514 513 }
515 514
516 515 fn get_mut_or_init(&self, repo: &Repo) -> Result<RefMut<T>, E> {
517 516 let mut borrowed = self.value.borrow_mut();
518 517 if borrowed.is_none() {
519 518 *borrowed = Some((self.init)(repo)?);
520 519 }
521 520 Ok(RefMut::map(borrowed, |option| option.as_mut().unwrap()))
522 521 }
523 522 }
General Comments 0
You need to be logged in to leave comments. Login now