##// END OF EJS Templates
copies-rust: rename Oracle.is_ancestor to Oracle.is_overwrite...
marmoute -
r46769:ecbb2fc9 default
parent child Browse files
Show More
@@ -1,758 +1,758
1 1 use crate::utils::hg_path::HgPath;
2 2 use crate::utils::hg_path::HgPathBuf;
3 3 use crate::Revision;
4 4 use crate::NULL_REVISION;
5 5
6 6 use im_rc::ordmap::DiffItem;
7 7 use im_rc::ordmap::Entry;
8 8 use im_rc::ordmap::OrdMap;
9 9
10 10 use std::cmp::Ordering;
11 11 use std::collections::HashMap;
12 12 use std::convert::TryInto;
13 13
14 14 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
15 15
16 16 type PathToken = usize;
17 17
18 18 #[derive(Clone, Debug, PartialEq, Copy)]
19 19 struct TimeStampedPathCopy {
20 20 /// revision at which the copy information was added
21 21 rev: Revision,
22 22 /// the copy source, (Set to None in case of deletion of the associated
23 23 /// key)
24 24 path: Option<PathToken>,
25 25 }
26 26
27 27 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
28 28 type TimeStampedPathCopies = OrdMap<PathToken, TimeStampedPathCopy>;
29 29
30 30 /// hold parent 1, parent 2 and relevant files actions.
31 31 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
32 32
33 33 /// represent the files affected by a changesets
34 34 ///
35 35 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
36 36 /// all the data categories tracked by it.
37 37 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
38 38 /// all the data categories tracked by it.
39 39 pub struct ChangedFiles<'a> {
40 40 nb_items: u32,
41 41 index: &'a [u8],
42 42 data: &'a [u8],
43 43 }
44 44
45 45 /// Represent active changes that affect the copy tracing.
46 46 enum Action<'a> {
47 47 /// The parent ? children edge is removing a file
48 48 ///
49 49 /// (actually, this could be the edge from the other parent, but it does
50 50 /// not matters)
51 51 Removed(&'a HgPath),
52 52 /// The parent ? children edge introduce copy information between (dest,
53 53 /// source)
54 54 Copied(&'a HgPath, &'a HgPath),
55 55 }
56 56
57 57 /// This express the possible "special" case we can get in a merge
58 58 ///
59 59 /// See mercurial/metadata.py for details on these values.
60 60 #[derive(PartialEq)]
61 61 enum MergeCase {
62 62 /// Merged: file had history on both side that needed to be merged
63 63 Merged,
64 64 /// Salvaged: file was candidate for deletion, but survived the merge
65 65 Salvaged,
66 66 /// Normal: Not one of the two cases above
67 67 Normal,
68 68 }
69 69
70 70 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
71 71
72 72 const EMPTY: &[u8] = b"";
73 73 const COPY_MASK: u8 = 3;
74 74 const P1_COPY: u8 = 2;
75 75 const P2_COPY: u8 = 3;
76 76 const ACTION_MASK: u8 = 28;
77 77 const REMOVED: u8 = 12;
78 78 const MERGED: u8 = 8;
79 79 const SALVAGED: u8 = 16;
80 80
81 81 impl<'a> ChangedFiles<'a> {
82 82 const INDEX_START: usize = 4;
83 83 const ENTRY_SIZE: u32 = 9;
84 84 const FILENAME_START: u32 = 1;
85 85 const COPY_SOURCE_START: u32 = 5;
86 86
87 87 pub fn new(data: &'a [u8]) -> Self {
88 88 assert!(
89 89 data.len() >= 4,
90 90 "data size ({}) is too small to contain the header (4)",
91 91 data.len()
92 92 );
93 93 let nb_items_raw: [u8; 4] = (&data[0..=3])
94 94 .try_into()
95 95 .expect("failed to turn 4 bytes into 4 bytes");
96 96 let nb_items = u32::from_be_bytes(nb_items_raw);
97 97
98 98 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
99 99 let index_end = Self::INDEX_START + index_size;
100 100
101 101 assert!(
102 102 data.len() >= index_end,
103 103 "data size ({}) is too small to fit the index_data ({})",
104 104 data.len(),
105 105 index_end
106 106 );
107 107
108 108 let ret = ChangedFiles {
109 109 nb_items,
110 110 index: &data[Self::INDEX_START..index_end],
111 111 data: &data[index_end..],
112 112 };
113 113 let max_data = ret.filename_end(nb_items - 1) as usize;
114 114 assert!(
115 115 ret.data.len() >= max_data,
116 116 "data size ({}) is too small to fit all data ({})",
117 117 data.len(),
118 118 index_end + max_data
119 119 );
120 120 ret
121 121 }
122 122
123 123 pub fn new_empty() -> Self {
124 124 ChangedFiles {
125 125 nb_items: 0,
126 126 index: EMPTY,
127 127 data: EMPTY,
128 128 }
129 129 }
130 130
131 131 /// internal function to return an individual entry at a given index
132 132 fn entry(&'a self, idx: u32) -> FileChange<'a> {
133 133 if idx >= self.nb_items {
134 134 panic!(
135 135 "index for entry is higher that the number of file {} >= {}",
136 136 idx, self.nb_items
137 137 )
138 138 }
139 139 let flags = self.flags(idx);
140 140 let filename = self.filename(idx);
141 141 let copy_idx = self.copy_idx(idx);
142 142 let copy_source = self.filename(copy_idx);
143 143 (flags, filename, copy_source)
144 144 }
145 145
146 146 /// internal function to return the filename of the entry at a given index
147 147 fn filename(&self, idx: u32) -> &HgPath {
148 148 let filename_start;
149 149 if idx == 0 {
150 150 filename_start = 0;
151 151 } else {
152 152 filename_start = self.filename_end(idx - 1)
153 153 }
154 154 let filename_end = self.filename_end(idx);
155 155 let filename_start = filename_start as usize;
156 156 let filename_end = filename_end as usize;
157 157 HgPath::new(&self.data[filename_start..filename_end])
158 158 }
159 159
160 160 /// internal function to return the flag field of the entry at a given
161 161 /// index
162 162 fn flags(&self, idx: u32) -> u8 {
163 163 let idx = idx as usize;
164 164 self.index[idx * (Self::ENTRY_SIZE as usize)]
165 165 }
166 166
167 167 /// internal function to return the end of a filename part at a given index
168 168 fn filename_end(&self, idx: u32) -> u32 {
169 169 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
170 170 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
171 171 let start = start as usize;
172 172 let end = end as usize;
173 173 let raw = (&self.index[start..end])
174 174 .try_into()
175 175 .expect("failed to turn 4 bytes into 4 bytes");
176 176 u32::from_be_bytes(raw)
177 177 }
178 178
179 179 /// internal function to return index of the copy source of the entry at a
180 180 /// given index
181 181 fn copy_idx(&self, idx: u32) -> u32 {
182 182 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
183 183 let end = (idx + 1) * Self::ENTRY_SIZE;
184 184 let start = start as usize;
185 185 let end = end as usize;
186 186 let raw = (&self.index[start..end])
187 187 .try_into()
188 188 .expect("failed to turn 4 bytes into 4 bytes");
189 189 u32::from_be_bytes(raw)
190 190 }
191 191
192 192 /// Return an iterator over all the `Action` in this instance.
193 193 fn iter_actions(&self, parent: Parent) -> ActionsIterator {
194 194 ActionsIterator {
195 195 changes: &self,
196 196 parent: parent,
197 197 current: 0,
198 198 }
199 199 }
200 200
201 201 /// return the MergeCase value associated with a filename
202 202 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
203 203 if self.nb_items == 0 {
204 204 return MergeCase::Normal;
205 205 }
206 206 let mut low_part = 0;
207 207 let mut high_part = self.nb_items;
208 208
209 209 while low_part < high_part {
210 210 let cursor = (low_part + high_part - 1) / 2;
211 211 let (flags, filename, _source) = self.entry(cursor);
212 212 match path.cmp(filename) {
213 213 Ordering::Less => low_part = cursor + 1,
214 214 Ordering::Greater => high_part = cursor,
215 215 Ordering::Equal => {
216 216 return match flags & ACTION_MASK {
217 217 MERGED => MergeCase::Merged,
218 218 SALVAGED => MergeCase::Salvaged,
219 219 _ => MergeCase::Normal,
220 220 };
221 221 }
222 222 }
223 223 }
224 224 MergeCase::Normal
225 225 }
226 226 }
227 227
228 228 /// A struct responsible for answering "is X ancestors of Y" quickly
229 229 ///
230 230 /// The structure will delegate ancestors call to a callback, and cache the
231 231 /// result.
232 232 #[derive(Debug)]
233 233 struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
234 234 inner: &'a A,
235 235 pairs: HashMap<(Revision, Revision), bool>,
236 236 }
237 237
238 238 impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
239 239 fn new(func: &'a A) -> Self {
240 240 Self {
241 241 inner: func,
242 242 pairs: HashMap::default(),
243 243 }
244 244 }
245 245
246 246 /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
247 fn is_ancestor(&mut self, anc: Revision, desc: Revision) -> bool {
247 fn is_overwrite(&mut self, anc: Revision, desc: Revision) -> bool {
248 248 if anc > desc {
249 249 false
250 250 } else if anc == desc {
251 251 true
252 252 } else {
253 253 if let Some(b) = self.pairs.get(&(anc, desc)) {
254 254 *b
255 255 } else {
256 256 let b = (self.inner)(anc, desc);
257 257 self.pairs.insert((anc, desc), b);
258 258 b
259 259 }
260 260 }
261 261 }
262 262 }
263 263
264 264 struct ActionsIterator<'a> {
265 265 changes: &'a ChangedFiles<'a>,
266 266 parent: Parent,
267 267 current: u32,
268 268 }
269 269
270 270 impl<'a> Iterator for ActionsIterator<'a> {
271 271 type Item = Action<'a>;
272 272
273 273 fn next(&mut self) -> Option<Action<'a>> {
274 274 let copy_flag = match self.parent {
275 275 Parent::FirstParent => P1_COPY,
276 276 Parent::SecondParent => P2_COPY,
277 277 };
278 278 while self.current < self.changes.nb_items {
279 279 let (flags, file, source) = self.changes.entry(self.current);
280 280 self.current += 1;
281 281 if (flags & ACTION_MASK) == REMOVED {
282 282 return Some(Action::Removed(file));
283 283 }
284 284 let copy = flags & COPY_MASK;
285 285 if copy == copy_flag {
286 286 return Some(Action::Copied(file, source));
287 287 }
288 288 }
289 289 return None;
290 290 }
291 291 }
292 292
293 293 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
294 294 /// ChangedFiles
295 295 ///
296 296 /// It is passed to the RevInfoMaker callback who can assign any necessary
297 297 /// content to the `data` attribute. The copy tracing code is responsible for
298 298 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
299 299 pub struct DataHolder<D> {
300 300 /// RevInfoMaker callback should assign data referenced by the
301 301 /// ChangedFiles struct it return to this attribute. The DataHolder
302 302 /// lifetime will be at least as long as the ChangedFiles one.
303 303 pub data: Option<D>,
304 304 }
305 305
306 306 pub type RevInfoMaker<'a, D> =
307 307 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
308 308
309 309 /// enum used to carry information about the parent β†’ child currently processed
310 310 #[derive(Copy, Clone, Debug)]
311 311 enum Parent {
312 312 /// The `p1(x) β†’ x` edge
313 313 FirstParent,
314 314 /// The `p2(x) β†’ x` edge
315 315 SecondParent,
316 316 }
317 317
318 318 /// A small "tokenizer" responsible of turning full HgPath into lighter
319 319 /// PathToken
320 320 ///
321 321 /// Dealing with small object, like integer is much faster, so HgPath input are
322 322 /// turned into integer "PathToken" and converted back in the end.
323 323 #[derive(Clone, Debug, Default)]
324 324 struct TwoWayPathMap {
325 325 token: HashMap<HgPathBuf, PathToken>,
326 326 path: Vec<HgPathBuf>,
327 327 }
328 328
329 329 impl TwoWayPathMap {
330 330 fn tokenize(&mut self, path: &HgPath) -> PathToken {
331 331 match self.token.get(path) {
332 332 Some(a) => *a,
333 333 None => {
334 334 let a = self.token.len();
335 335 let buf = path.to_owned();
336 336 self.path.push(buf.clone());
337 337 self.token.insert(buf, a);
338 338 a
339 339 }
340 340 }
341 341 }
342 342
343 343 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
344 344 assert!(token < self.path.len(), format!("Unknown token: {}", token));
345 345 &self.path[token]
346 346 }
347 347 }
348 348
349 349 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
350 350 ///
351 351 /// Arguments are:
352 352 ///
353 353 /// revs: all revisions to be considered
354 354 /// children: a {parent ? [childrens]} mapping
355 355 /// target_rev: the final revision we are combining copies to
356 356 /// rev_info(rev): callback to get revision information:
357 357 /// * first parent
358 358 /// * second parent
359 359 /// * ChangedFiles
360 360 /// isancestors(low_rev, high_rev): callback to check if a revision is an
361 361 /// ancestor of another
362 362 pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
363 363 revs: Vec<Revision>,
364 364 mut children_count: HashMap<Revision, usize>,
365 365 target_rev: Revision,
366 366 rev_info: RevInfoMaker<D>,
367 367 is_ancestor: &A,
368 368 ) -> PathCopies {
369 369 let mut all_copies = HashMap::new();
370 370 let mut oracle = AncestorOracle::new(is_ancestor);
371 371
372 372 let mut path_map = TwoWayPathMap::default();
373 373
374 374 for rev in revs {
375 375 let mut d: DataHolder<D> = DataHolder { data: None };
376 376 let (p1, p2, changes) = rev_info(rev, &mut d);
377 377
378 378 // We will chain the copies information accumulated for the parent with
379 379 // the individual copies information the curent revision. Creating a
380 380 // new TimeStampedPath for each `rev` β†’ `children` vertex.
381 381 let mut copies: Option<TimeStampedPathCopies> = None;
382 382 if p1 != NULL_REVISION {
383 383 // Retrieve data computed in a previous iteration
384 384 let parent_copies = get_and_clean_parent_copies(
385 385 &mut all_copies,
386 386 &mut children_count,
387 387 p1,
388 388 );
389 389 if let Some(parent_copies) = parent_copies {
390 390 // combine it with data for that revision
391 391 let vertex_copies = add_from_changes(
392 392 &mut path_map,
393 393 &parent_copies,
394 394 &changes,
395 395 Parent::FirstParent,
396 396 rev,
397 397 );
398 398 // keep that data around for potential later combination
399 399 copies = Some(vertex_copies);
400 400 }
401 401 }
402 402 if p2 != NULL_REVISION {
403 403 // Retrieve data computed in a previous iteration
404 404 let parent_copies = get_and_clean_parent_copies(
405 405 &mut all_copies,
406 406 &mut children_count,
407 407 p2,
408 408 );
409 409 if let Some(parent_copies) = parent_copies {
410 410 // combine it with data for that revision
411 411 let vertex_copies = add_from_changes(
412 412 &mut path_map,
413 413 &parent_copies,
414 414 &changes,
415 415 Parent::SecondParent,
416 416 rev,
417 417 );
418 418
419 419 copies = match copies {
420 420 None => Some(vertex_copies),
421 421 // Merge has two parents needs to combines their copy
422 422 // information.
423 423 //
424 424 // If we got data from both parents, We need to combine
425 425 // them.
426 426 Some(copies) => Some(merge_copies_dict(
427 427 &path_map,
428 428 vertex_copies,
429 429 copies,
430 430 &changes,
431 431 &mut oracle,
432 432 )),
433 433 };
434 434 }
435 435 }
436 436 match copies {
437 437 Some(copies) => {
438 438 all_copies.insert(rev, copies);
439 439 }
440 440 _ => {}
441 441 }
442 442 }
443 443
444 444 // Drop internal information (like the timestamp) and return the final
445 445 // mapping.
446 446 let tt_result = all_copies
447 447 .remove(&target_rev)
448 448 .expect("target revision was not processed");
449 449 let mut result = PathCopies::default();
450 450 for (dest, tt_source) in tt_result {
451 451 if let Some(path) = tt_source.path {
452 452 let path_dest = path_map.untokenize(dest).to_owned();
453 453 let path_path = path_map.untokenize(path).to_owned();
454 454 result.insert(path_dest, path_path);
455 455 }
456 456 }
457 457 result
458 458 }
459 459
460 460 /// fetch previous computed information
461 461 ///
462 462 /// If no other children are expected to need this information, we drop it from
463 463 /// the cache.
464 464 ///
465 465 /// If parent is not part of the set we are expected to walk, return None.
466 466 fn get_and_clean_parent_copies(
467 467 all_copies: &mut HashMap<Revision, TimeStampedPathCopies>,
468 468 children_count: &mut HashMap<Revision, usize>,
469 469 parent_rev: Revision,
470 470 ) -> Option<TimeStampedPathCopies> {
471 471 let count = children_count.get_mut(&parent_rev)?;
472 472 *count -= 1;
473 473 if *count == 0 {
474 474 match all_copies.remove(&parent_rev) {
475 475 Some(c) => Some(c),
476 476 None => Some(TimeStampedPathCopies::default()),
477 477 }
478 478 } else {
479 479 match all_copies.get(&parent_rev) {
480 480 Some(c) => Some(c.clone()),
481 481 None => Some(TimeStampedPathCopies::default()),
482 482 }
483 483 }
484 484 }
485 485
486 486 /// Combine ChangedFiles with some existing PathCopies information and return
487 487 /// the result
488 488 fn add_from_changes(
489 489 path_map: &mut TwoWayPathMap,
490 490 base_copies: &TimeStampedPathCopies,
491 491 changes: &ChangedFiles,
492 492 parent: Parent,
493 493 current_rev: Revision,
494 494 ) -> TimeStampedPathCopies {
495 495 let mut copies = base_copies.clone();
496 496 for action in changes.iter_actions(parent) {
497 497 match action {
498 498 Action::Copied(path_dest, path_source) => {
499 499 let dest = path_map.tokenize(path_dest);
500 500 let source = path_map.tokenize(path_source);
501 501 let entry;
502 502 if let Some(v) = base_copies.get(&source) {
503 503 entry = match &v.path {
504 504 Some(path) => Some((*(path)).to_owned()),
505 505 None => Some(source.to_owned()),
506 506 }
507 507 } else {
508 508 entry = Some(source.to_owned());
509 509 }
510 510 // Each new entry is introduced by the children, we
511 511 // record this information as we will need it to take
512 512 // the right decision when merging conflicting copy
513 513 // information. See merge_copies_dict for details.
514 514 match copies.entry(dest) {
515 515 Entry::Vacant(slot) => {
516 516 let ttpc = TimeStampedPathCopy {
517 517 rev: current_rev,
518 518 path: entry,
519 519 };
520 520 slot.insert(ttpc);
521 521 }
522 522 Entry::Occupied(mut slot) => {
523 523 let mut ttpc = slot.get_mut();
524 524 ttpc.rev = current_rev;
525 525 ttpc.path = entry;
526 526 }
527 527 }
528 528 }
529 529 Action::Removed(deleted_path) => {
530 530 // We must drop copy information for removed file.
531 531 //
532 532 // We need to explicitly record them as dropped to
533 533 // propagate this information when merging two
534 534 // TimeStampedPathCopies object.
535 535 let deleted = path_map.tokenize(deleted_path);
536 536 copies.entry(deleted).and_modify(|old| {
537 537 old.rev = current_rev;
538 538 old.path = None;
539 539 });
540 540 }
541 541 }
542 542 }
543 543 copies
544 544 }
545 545
546 546 /// merge two copies-mapping together, minor and major
547 547 ///
548 548 /// In case of conflict, value from "major" will be picked, unless in some
549 549 /// cases. See inline documentation for details.
550 550 fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
551 551 path_map: &TwoWayPathMap,
552 552 mut minor: TimeStampedPathCopies,
553 553 mut major: TimeStampedPathCopies,
554 554 changes: &ChangedFiles,
555 555 oracle: &mut AncestorOracle<A>,
556 556 ) -> TimeStampedPathCopies {
557 557 // This closure exist as temporary help while multiple developper are
558 558 // actively working on this code. Feel free to re-inline it once this
559 559 // code is more settled.
560 560 let mut cmp_value =
561 561 |dest: &PathToken,
562 562 src_minor: &TimeStampedPathCopy,
563 563 src_major: &TimeStampedPathCopy| {
564 564 compare_value(
565 565 path_map, changes, oracle, dest, src_minor, src_major,
566 566 )
567 567 };
568 568 if minor.is_empty() {
569 569 major
570 570 } else if major.is_empty() {
571 571 minor
572 572 } else if minor.len() * 2 < major.len() {
573 573 // Lets says we are merging two TimeStampedPathCopies instance A and B.
574 574 //
575 575 // If A contains N items, the merge result will never contains more
576 576 // than N values differents than the one in A
577 577 //
578 578 // If B contains M items, with M > N, the merge result will always
579 579 // result in a minimum of M - N value differents than the on in
580 580 // A
581 581 //
582 582 // As a result, if N < (M-N), we know that simply iterating over A will
583 583 // yield less difference than iterating over the difference
584 584 // between A and B.
585 585 //
586 586 // This help performance a lot in case were a tiny
587 587 // TimeStampedPathCopies is merged with a much larger one.
588 588 for (dest, src_minor) in minor {
589 589 let src_major = major.get(&dest);
590 590 match src_major {
591 591 None => major.insert(dest, src_minor),
592 592 Some(src_major) => {
593 593 match cmp_value(&dest, &src_minor, src_major) {
594 594 MergePick::Any | MergePick::Major => None,
595 595 MergePick::Minor => major.insert(dest, src_minor),
596 596 }
597 597 }
598 598 };
599 599 }
600 600 major
601 601 } else if major.len() * 2 < minor.len() {
602 602 // This use the same rational than the previous block.
603 603 // (Check previous block documentation for details.)
604 604 for (dest, src_major) in major {
605 605 let src_minor = minor.get(&dest);
606 606 match src_minor {
607 607 None => minor.insert(dest, src_major),
608 608 Some(src_minor) => {
609 609 match cmp_value(&dest, src_minor, &src_major) {
610 610 MergePick::Any | MergePick::Minor => None,
611 611 MergePick::Major => minor.insert(dest, src_major),
612 612 }
613 613 }
614 614 };
615 615 }
616 616 minor
617 617 } else {
618 618 let mut override_minor = Vec::new();
619 619 let mut override_major = Vec::new();
620 620
621 621 let mut to_major = |k: &PathToken, v: &TimeStampedPathCopy| {
622 622 override_major.push((k.clone(), v.clone()))
623 623 };
624 624 let mut to_minor = |k: &PathToken, v: &TimeStampedPathCopy| {
625 625 override_minor.push((k.clone(), v.clone()))
626 626 };
627 627
628 628 // The diff function leverage detection of the identical subpart if
629 629 // minor and major has some common ancestors. This make it very
630 630 // fast is most case.
631 631 //
632 632 // In case where the two map are vastly different in size, the current
633 633 // approach is still slowish because the iteration will iterate over
634 634 // all the "exclusive" content of the larger on. This situation can be
635 635 // frequent when the subgraph of revision we are processing has a lot
636 636 // of roots. Each roots adding they own fully new map to the mix (and
637 637 // likely a small map, if the path from the root to the "main path" is
638 638 // small.
639 639 //
640 640 // We could do better by detecting such situation and processing them
641 641 // differently.
642 642 for d in minor.diff(&major) {
643 643 match d {
644 644 DiffItem::Add(k, v) => to_minor(k, v),
645 645 DiffItem::Remove(k, v) => to_major(k, v),
646 646 DiffItem::Update { old, new } => {
647 647 let (dest, src_major) = new;
648 648 let (_, src_minor) = old;
649 649 match cmp_value(dest, src_minor, src_major) {
650 650 MergePick::Major => to_minor(dest, src_major),
651 651 MergePick::Minor => to_major(dest, src_minor),
652 652 // If the two entry are identical, no need to do
653 653 // anything (but diff should not have yield them)
654 654 MergePick::Any => unreachable!(),
655 655 }
656 656 }
657 657 };
658 658 }
659 659
660 660 let updates;
661 661 let mut result;
662 662 if override_major.is_empty() {
663 663 result = major
664 664 } else if override_minor.is_empty() {
665 665 result = minor
666 666 } else {
667 667 if override_minor.len() < override_major.len() {
668 668 updates = override_minor;
669 669 result = minor;
670 670 } else {
671 671 updates = override_major;
672 672 result = major;
673 673 }
674 674 for (k, v) in updates {
675 675 result.insert(k, v);
676 676 }
677 677 }
678 678 result
679 679 }
680 680 }
681 681
682 682 /// represent the side that should prevail when merging two
683 683 /// TimeStampedPathCopies
684 684 enum MergePick {
685 685 /// The "major" (p1) side prevails
686 686 Major,
687 687 /// The "minor" (p2) side prevails
688 688 Minor,
689 689 /// Any side could be used (because they are the same)
690 690 Any,
691 691 }
692 692
693 693 /// decide which side prevails in case of conflicting values
694 694 #[allow(clippy::if_same_then_else)]
695 695 fn compare_value<A: Fn(Revision, Revision) -> bool>(
696 696 path_map: &TwoWayPathMap,
697 697 changes: &ChangedFiles,
698 698 oracle: &mut AncestorOracle<A>,
699 699 dest: &PathToken,
700 700 src_minor: &TimeStampedPathCopy,
701 701 src_major: &TimeStampedPathCopy,
702 702 ) -> MergePick {
703 703 if src_major.path == src_minor.path {
704 704 // we have the same value, but from other source;
705 705 if src_major.rev == src_minor.rev {
706 706 // If the two entry are identical, they are both valid
707 707 MergePick::Any
708 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
708 } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
709 709 MergePick::Minor
710 710 } else {
711 711 MergePick::Major
712 712 }
713 713 } else if src_major.rev == src_minor.rev {
714 714 // We cannot get copy information for both p1 and p2 in the
715 715 // same rev. So this is the same value.
716 716 unreachable!(
717 717 "conflict information from p1 and p2 in the same revision"
718 718 );
719 719 } else {
720 720 let dest_path = path_map.untokenize(*dest);
721 721 let action = changes.get_merge_case(dest_path);
722 722 if src_major.path.is_none() && action == MergeCase::Salvaged {
723 723 // If the file is "deleted" in the major side but was
724 724 // salvaged by the merge, we keep the minor side alive
725 725 MergePick::Minor
726 726 } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
727 727 // If the file is "deleted" in the minor side but was
728 728 // salvaged by the merge, unconditionnaly preserve the
729 729 // major side.
730 730 MergePick::Major
731 731 } else if action == MergeCase::Merged {
732 732 // If the file was actively merged, copy information
733 733 // from each side might conflict. The major side will
734 734 // win such conflict.
735 735 MergePick::Major
736 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
736 } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
737 737 // If the minor side is strictly newer than the major
738 738 // side, it should be kept.
739 739 MergePick::Minor
740 740 } else if src_major.path.is_some() {
741 741 // without any special case, the "major" value win
742 742 // other the "minor" one.
743 743 MergePick::Major
744 } else if oracle.is_ancestor(src_minor.rev, src_major.rev) {
744 } else if oracle.is_overwrite(src_minor.rev, src_major.rev) {
745 745 // the "major" rev is a direct ancestors of "minor",
746 746 // any different value should
747 747 // overwrite
748 748 MergePick::Major
749 749 } else {
750 750 // major version is None (so the file was deleted on
751 751 // that branch) and that branch is independant (neither
752 752 // minor nor major is an ancestors of the other one.)
753 753 // We preserve the new
754 754 // information about the new file.
755 755 MergePick::Minor
756 756 }
757 757 }
758 758 }
General Comments 0
You need to be logged in to leave comments. Login now