##// END OF EJS Templates
copies-rust: start recording overwrite as they happens...
marmoute -
r46770:fce2f20a default
parent child Browse files
Show More
@@ -1,758 +1,767 b''
1 1 use crate::utils::hg_path::HgPath;
2 2 use crate::utils::hg_path::HgPathBuf;
3 3 use crate::Revision;
4 4 use crate::NULL_REVISION;
5 5
6 6 use im_rc::ordmap::DiffItem;
7 7 use im_rc::ordmap::Entry;
8 8 use im_rc::ordmap::OrdMap;
9 9
10 10 use std::cmp::Ordering;
11 11 use std::collections::HashMap;
12 12 use std::convert::TryInto;
13 13
14 14 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
15 15
16 16 type PathToken = usize;
17 17
18 18 #[derive(Clone, Debug, PartialEq, Copy)]
19 19 struct TimeStampedPathCopy {
20 20 /// revision at which the copy information was added
21 21 rev: Revision,
22 22 /// the copy source, (Set to None in case of deletion of the associated
23 23 /// key)
24 24 path: Option<PathToken>,
25 25 }
26 26
27 27 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
28 28 type TimeStampedPathCopies = OrdMap<PathToken, TimeStampedPathCopy>;
29 29
30 30 /// hold parent 1, parent 2 and relevant files actions.
31 31 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
32 32
33 33 /// represent the files affected by a changesets
34 34 ///
35 35 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
36 36 /// all the data categories tracked by it.
37 37 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
38 38 /// all the data categories tracked by it.
39 39 pub struct ChangedFiles<'a> {
40 40 nb_items: u32,
41 41 index: &'a [u8],
42 42 data: &'a [u8],
43 43 }
44 44
45 45 /// Represent active changes that affect the copy tracing.
46 46 enum Action<'a> {
47 47 /// The parent ? children edge is removing a file
48 48 ///
49 49 /// (actually, this could be the edge from the other parent, but it does
50 50 /// not matters)
51 51 Removed(&'a HgPath),
52 52 /// The parent ? children edge introduce copy information between (dest,
53 53 /// source)
54 54 Copied(&'a HgPath, &'a HgPath),
55 55 }
56 56
57 57 /// This express the possible "special" case we can get in a merge
58 58 ///
59 59 /// See mercurial/metadata.py for details on these values.
60 60 #[derive(PartialEq)]
61 61 enum MergeCase {
62 62 /// Merged: file had history on both side that needed to be merged
63 63 Merged,
64 64 /// Salvaged: file was candidate for deletion, but survived the merge
65 65 Salvaged,
66 66 /// Normal: Not one of the two cases above
67 67 Normal,
68 68 }
69 69
70 70 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
71 71
72 72 const EMPTY: &[u8] = b"";
73 73 const COPY_MASK: u8 = 3;
74 74 const P1_COPY: u8 = 2;
75 75 const P2_COPY: u8 = 3;
76 76 const ACTION_MASK: u8 = 28;
77 77 const REMOVED: u8 = 12;
78 78 const MERGED: u8 = 8;
79 79 const SALVAGED: u8 = 16;
80 80
81 81 impl<'a> ChangedFiles<'a> {
82 82 const INDEX_START: usize = 4;
83 83 const ENTRY_SIZE: u32 = 9;
84 84 const FILENAME_START: u32 = 1;
85 85 const COPY_SOURCE_START: u32 = 5;
86 86
87 87 pub fn new(data: &'a [u8]) -> Self {
88 88 assert!(
89 89 data.len() >= 4,
90 90 "data size ({}) is too small to contain the header (4)",
91 91 data.len()
92 92 );
93 93 let nb_items_raw: [u8; 4] = (&data[0..=3])
94 94 .try_into()
95 95 .expect("failed to turn 4 bytes into 4 bytes");
96 96 let nb_items = u32::from_be_bytes(nb_items_raw);
97 97
98 98 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
99 99 let index_end = Self::INDEX_START + index_size;
100 100
101 101 assert!(
102 102 data.len() >= index_end,
103 103 "data size ({}) is too small to fit the index_data ({})",
104 104 data.len(),
105 105 index_end
106 106 );
107 107
108 108 let ret = ChangedFiles {
109 109 nb_items,
110 110 index: &data[Self::INDEX_START..index_end],
111 111 data: &data[index_end..],
112 112 };
113 113 let max_data = ret.filename_end(nb_items - 1) as usize;
114 114 assert!(
115 115 ret.data.len() >= max_data,
116 116 "data size ({}) is too small to fit all data ({})",
117 117 data.len(),
118 118 index_end + max_data
119 119 );
120 120 ret
121 121 }
122 122
123 123 pub fn new_empty() -> Self {
124 124 ChangedFiles {
125 125 nb_items: 0,
126 126 index: EMPTY,
127 127 data: EMPTY,
128 128 }
129 129 }
130 130
131 131 /// internal function to return an individual entry at a given index
132 132 fn entry(&'a self, idx: u32) -> FileChange<'a> {
133 133 if idx >= self.nb_items {
134 134 panic!(
135 135 "index for entry is higher that the number of file {} >= {}",
136 136 idx, self.nb_items
137 137 )
138 138 }
139 139 let flags = self.flags(idx);
140 140 let filename = self.filename(idx);
141 141 let copy_idx = self.copy_idx(idx);
142 142 let copy_source = self.filename(copy_idx);
143 143 (flags, filename, copy_source)
144 144 }
145 145
146 146 /// internal function to return the filename of the entry at a given index
147 147 fn filename(&self, idx: u32) -> &HgPath {
148 148 let filename_start;
149 149 if idx == 0 {
150 150 filename_start = 0;
151 151 } else {
152 152 filename_start = self.filename_end(idx - 1)
153 153 }
154 154 let filename_end = self.filename_end(idx);
155 155 let filename_start = filename_start as usize;
156 156 let filename_end = filename_end as usize;
157 157 HgPath::new(&self.data[filename_start..filename_end])
158 158 }
159 159
160 160 /// internal function to return the flag field of the entry at a given
161 161 /// index
162 162 fn flags(&self, idx: u32) -> u8 {
163 163 let idx = idx as usize;
164 164 self.index[idx * (Self::ENTRY_SIZE as usize)]
165 165 }
166 166
167 167 /// internal function to return the end of a filename part at a given index
168 168 fn filename_end(&self, idx: u32) -> u32 {
169 169 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
170 170 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
171 171 let start = start as usize;
172 172 let end = end as usize;
173 173 let raw = (&self.index[start..end])
174 174 .try_into()
175 175 .expect("failed to turn 4 bytes into 4 bytes");
176 176 u32::from_be_bytes(raw)
177 177 }
178 178
179 179 /// internal function to return index of the copy source of the entry at a
180 180 /// given index
181 181 fn copy_idx(&self, idx: u32) -> u32 {
182 182 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
183 183 let end = (idx + 1) * Self::ENTRY_SIZE;
184 184 let start = start as usize;
185 185 let end = end as usize;
186 186 let raw = (&self.index[start..end])
187 187 .try_into()
188 188 .expect("failed to turn 4 bytes into 4 bytes");
189 189 u32::from_be_bytes(raw)
190 190 }
191 191
192 192 /// Return an iterator over all the `Action` in this instance.
193 193 fn iter_actions(&self, parent: Parent) -> ActionsIterator {
194 194 ActionsIterator {
195 195 changes: &self,
196 196 parent: parent,
197 197 current: 0,
198 198 }
199 199 }
200 200
201 201 /// return the MergeCase value associated with a filename
202 202 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
203 203 if self.nb_items == 0 {
204 204 return MergeCase::Normal;
205 205 }
206 206 let mut low_part = 0;
207 207 let mut high_part = self.nb_items;
208 208
209 209 while low_part < high_part {
210 210 let cursor = (low_part + high_part - 1) / 2;
211 211 let (flags, filename, _source) = self.entry(cursor);
212 212 match path.cmp(filename) {
213 213 Ordering::Less => low_part = cursor + 1,
214 214 Ordering::Greater => high_part = cursor,
215 215 Ordering::Equal => {
216 216 return match flags & ACTION_MASK {
217 217 MERGED => MergeCase::Merged,
218 218 SALVAGED => MergeCase::Salvaged,
219 219 _ => MergeCase::Normal,
220 220 };
221 221 }
222 222 }
223 223 }
224 224 MergeCase::Normal
225 225 }
226 226 }
227 227
228 228 /// A struct responsible for answering "is X ancestors of Y" quickly
229 229 ///
230 230 /// The structure will delegate ancestors call to a callback, and cache the
231 231 /// result.
232 232 #[derive(Debug)]
233 233 struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
234 234 inner: &'a A,
235 235 pairs: HashMap<(Revision, Revision), bool>,
236 236 }
237 237
238 238 impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
239 239 fn new(func: &'a A) -> Self {
240 240 Self {
241 241 inner: func,
242 242 pairs: HashMap::default(),
243 243 }
244 244 }
245 245
246 fn record_overwrite(&mut self, anc: Revision, desc: Revision) {
247 self.pairs.insert((anc, desc), true);
248 }
249
246 250 /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
247 251 fn is_overwrite(&mut self, anc: Revision, desc: Revision) -> bool {
248 252 if anc > desc {
249 253 false
250 254 } else if anc == desc {
251 255 true
252 256 } else {
253 257 if let Some(b) = self.pairs.get(&(anc, desc)) {
254 258 *b
255 259 } else {
256 260 let b = (self.inner)(anc, desc);
257 261 self.pairs.insert((anc, desc), b);
258 262 b
259 263 }
260 264 }
261 265 }
262 266 }
263 267
264 268 struct ActionsIterator<'a> {
265 269 changes: &'a ChangedFiles<'a>,
266 270 parent: Parent,
267 271 current: u32,
268 272 }
269 273
270 274 impl<'a> Iterator for ActionsIterator<'a> {
271 275 type Item = Action<'a>;
272 276
273 277 fn next(&mut self) -> Option<Action<'a>> {
274 278 let copy_flag = match self.parent {
275 279 Parent::FirstParent => P1_COPY,
276 280 Parent::SecondParent => P2_COPY,
277 281 };
278 282 while self.current < self.changes.nb_items {
279 283 let (flags, file, source) = self.changes.entry(self.current);
280 284 self.current += 1;
281 285 if (flags & ACTION_MASK) == REMOVED {
282 286 return Some(Action::Removed(file));
283 287 }
284 288 let copy = flags & COPY_MASK;
285 289 if copy == copy_flag {
286 290 return Some(Action::Copied(file, source));
287 291 }
288 292 }
289 293 return None;
290 294 }
291 295 }
292 296
293 297 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
294 298 /// ChangedFiles
295 299 ///
296 300 /// It is passed to the RevInfoMaker callback who can assign any necessary
297 301 /// content to the `data` attribute. The copy tracing code is responsible for
298 302 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
299 303 pub struct DataHolder<D> {
300 304 /// RevInfoMaker callback should assign data referenced by the
301 305 /// ChangedFiles struct it return to this attribute. The DataHolder
302 306 /// lifetime will be at least as long as the ChangedFiles one.
303 307 pub data: Option<D>,
304 308 }
305 309
306 310 pub type RevInfoMaker<'a, D> =
307 311 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
308 312
309 313 /// enum used to carry information about the parent → child currently processed
310 314 #[derive(Copy, Clone, Debug)]
311 315 enum Parent {
312 316 /// The `p1(x) → x` edge
313 317 FirstParent,
314 318 /// The `p2(x) → x` edge
315 319 SecondParent,
316 320 }
317 321
318 322 /// A small "tokenizer" responsible of turning full HgPath into lighter
319 323 /// PathToken
320 324 ///
321 325 /// Dealing with small object, like integer is much faster, so HgPath input are
322 326 /// turned into integer "PathToken" and converted back in the end.
323 327 #[derive(Clone, Debug, Default)]
324 328 struct TwoWayPathMap {
325 329 token: HashMap<HgPathBuf, PathToken>,
326 330 path: Vec<HgPathBuf>,
327 331 }
328 332
329 333 impl TwoWayPathMap {
330 334 fn tokenize(&mut self, path: &HgPath) -> PathToken {
331 335 match self.token.get(path) {
332 336 Some(a) => *a,
333 337 None => {
334 338 let a = self.token.len();
335 339 let buf = path.to_owned();
336 340 self.path.push(buf.clone());
337 341 self.token.insert(buf, a);
338 342 a
339 343 }
340 344 }
341 345 }
342 346
343 347 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
344 348 assert!(token < self.path.len(), format!("Unknown token: {}", token));
345 349 &self.path[token]
346 350 }
347 351 }
348 352
349 353 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
350 354 ///
351 355 /// Arguments are:
352 356 ///
353 357 /// revs: all revisions to be considered
354 358 /// children: a {parent ? [childrens]} mapping
355 359 /// target_rev: the final revision we are combining copies to
356 360 /// rev_info(rev): callback to get revision information:
357 361 /// * first parent
358 362 /// * second parent
359 363 /// * ChangedFiles
360 364 /// isancestors(low_rev, high_rev): callback to check if a revision is an
361 365 /// ancestor of another
362 366 pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
363 367 revs: Vec<Revision>,
364 368 mut children_count: HashMap<Revision, usize>,
365 369 target_rev: Revision,
366 370 rev_info: RevInfoMaker<D>,
367 371 is_ancestor: &A,
368 372 ) -> PathCopies {
369 373 let mut all_copies = HashMap::new();
370 374 let mut oracle = AncestorOracle::new(is_ancestor);
371 375
372 376 let mut path_map = TwoWayPathMap::default();
373 377
374 378 for rev in revs {
375 379 let mut d: DataHolder<D> = DataHolder { data: None };
376 380 let (p1, p2, changes) = rev_info(rev, &mut d);
377 381
378 382 // We will chain the copies information accumulated for the parent with
379 383 // the individual copies information the curent revision. Creating a
380 384 // new TimeStampedPath for each `rev` → `children` vertex.
381 385 let mut copies: Option<TimeStampedPathCopies> = None;
382 386 if p1 != NULL_REVISION {
383 387 // Retrieve data computed in a previous iteration
384 388 let parent_copies = get_and_clean_parent_copies(
385 389 &mut all_copies,
386 390 &mut children_count,
387 391 p1,
388 392 );
389 393 if let Some(parent_copies) = parent_copies {
390 394 // combine it with data for that revision
391 395 let vertex_copies = add_from_changes(
392 396 &mut path_map,
397 &mut oracle,
393 398 &parent_copies,
394 399 &changes,
395 400 Parent::FirstParent,
396 401 rev,
397 402 );
398 403 // keep that data around for potential later combination
399 404 copies = Some(vertex_copies);
400 405 }
401 406 }
402 407 if p2 != NULL_REVISION {
403 408 // Retrieve data computed in a previous iteration
404 409 let parent_copies = get_and_clean_parent_copies(
405 410 &mut all_copies,
406 411 &mut children_count,
407 412 p2,
408 413 );
409 414 if let Some(parent_copies) = parent_copies {
410 415 // combine it with data for that revision
411 416 let vertex_copies = add_from_changes(
412 417 &mut path_map,
418 &mut oracle,
413 419 &parent_copies,
414 420 &changes,
415 421 Parent::SecondParent,
416 422 rev,
417 423 );
418 424
419 425 copies = match copies {
420 426 None => Some(vertex_copies),
421 427 // Merge has two parents needs to combines their copy
422 428 // information.
423 429 //
424 430 // If we got data from both parents, We need to combine
425 431 // them.
426 432 Some(copies) => Some(merge_copies_dict(
427 433 &path_map,
428 434 vertex_copies,
429 435 copies,
430 436 &changes,
431 437 &mut oracle,
432 438 )),
433 439 };
434 440 }
435 441 }
436 442 match copies {
437 443 Some(copies) => {
438 444 all_copies.insert(rev, copies);
439 445 }
440 446 _ => {}
441 447 }
442 448 }
443 449
444 450 // Drop internal information (like the timestamp) and return the final
445 451 // mapping.
446 452 let tt_result = all_copies
447 453 .remove(&target_rev)
448 454 .expect("target revision was not processed");
449 455 let mut result = PathCopies::default();
450 456 for (dest, tt_source) in tt_result {
451 457 if let Some(path) = tt_source.path {
452 458 let path_dest = path_map.untokenize(dest).to_owned();
453 459 let path_path = path_map.untokenize(path).to_owned();
454 460 result.insert(path_dest, path_path);
455 461 }
456 462 }
457 463 result
458 464 }
459 465
460 466 /// fetch previous computed information
461 467 ///
462 468 /// If no other children are expected to need this information, we drop it from
463 469 /// the cache.
464 470 ///
465 471 /// If parent is not part of the set we are expected to walk, return None.
466 472 fn get_and_clean_parent_copies(
467 473 all_copies: &mut HashMap<Revision, TimeStampedPathCopies>,
468 474 children_count: &mut HashMap<Revision, usize>,
469 475 parent_rev: Revision,
470 476 ) -> Option<TimeStampedPathCopies> {
471 477 let count = children_count.get_mut(&parent_rev)?;
472 478 *count -= 1;
473 479 if *count == 0 {
474 480 match all_copies.remove(&parent_rev) {
475 481 Some(c) => Some(c),
476 482 None => Some(TimeStampedPathCopies::default()),
477 483 }
478 484 } else {
479 485 match all_copies.get(&parent_rev) {
480 486 Some(c) => Some(c.clone()),
481 487 None => Some(TimeStampedPathCopies::default()),
482 488 }
483 489 }
484 490 }
485 491
486 492 /// Combine ChangedFiles with some existing PathCopies information and return
487 493 /// the result
488 fn add_from_changes(
494 fn add_from_changes<A: Fn(Revision, Revision) -> bool>(
489 495 path_map: &mut TwoWayPathMap,
496 oracle: &mut AncestorOracle<A>,
490 497 base_copies: &TimeStampedPathCopies,
491 498 changes: &ChangedFiles,
492 499 parent: Parent,
493 500 current_rev: Revision,
494 501 ) -> TimeStampedPathCopies {
495 502 let mut copies = base_copies.clone();
496 503 for action in changes.iter_actions(parent) {
497 504 match action {
498 505 Action::Copied(path_dest, path_source) => {
499 506 let dest = path_map.tokenize(path_dest);
500 507 let source = path_map.tokenize(path_source);
501 508 let entry;
502 509 if let Some(v) = base_copies.get(&source) {
503 510 entry = match &v.path {
504 511 Some(path) => Some((*(path)).to_owned()),
505 512 None => Some(source.to_owned()),
506 513 }
507 514 } else {
508 515 entry = Some(source.to_owned());
509 516 }
510 517 // Each new entry is introduced by the children, we
511 518 // record this information as we will need it to take
512 519 // the right decision when merging conflicting copy
513 520 // information. See merge_copies_dict for details.
514 521 match copies.entry(dest) {
515 522 Entry::Vacant(slot) => {
516 523 let ttpc = TimeStampedPathCopy {
517 524 rev: current_rev,
518 525 path: entry,
519 526 };
520 527 slot.insert(ttpc);
521 528 }
522 529 Entry::Occupied(mut slot) => {
523 530 let mut ttpc = slot.get_mut();
531 oracle.record_overwrite(ttpc.rev, current_rev);
524 532 ttpc.rev = current_rev;
525 533 ttpc.path = entry;
526 534 }
527 535 }
528 536 }
529 537 Action::Removed(deleted_path) => {
530 538 // We must drop copy information for removed file.
531 539 //
532 540 // We need to explicitly record them as dropped to
533 541 // propagate this information when merging two
534 542 // TimeStampedPathCopies object.
535 543 let deleted = path_map.tokenize(deleted_path);
536 544 copies.entry(deleted).and_modify(|old| {
545 oracle.record_overwrite(old.rev, current_rev);
537 546 old.rev = current_rev;
538 547 old.path = None;
539 548 });
540 549 }
541 550 }
542 551 }
543 552 copies
544 553 }
545 554
546 555 /// merge two copies-mapping together, minor and major
547 556 ///
548 557 /// In case of conflict, value from "major" will be picked, unless in some
549 558 /// cases. See inline documentation for details.
550 559 fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
551 560 path_map: &TwoWayPathMap,
552 561 mut minor: TimeStampedPathCopies,
553 562 mut major: TimeStampedPathCopies,
554 563 changes: &ChangedFiles,
555 564 oracle: &mut AncestorOracle<A>,
556 565 ) -> TimeStampedPathCopies {
557 566 // This closure exist as temporary help while multiple developper are
558 567 // actively working on this code. Feel free to re-inline it once this
559 568 // code is more settled.
560 569 let mut cmp_value =
561 570 |dest: &PathToken,
562 571 src_minor: &TimeStampedPathCopy,
563 572 src_major: &TimeStampedPathCopy| {
564 573 compare_value(
565 574 path_map, changes, oracle, dest, src_minor, src_major,
566 575 )
567 576 };
568 577 if minor.is_empty() {
569 578 major
570 579 } else if major.is_empty() {
571 580 minor
572 581 } else if minor.len() * 2 < major.len() {
573 582 // Lets says we are merging two TimeStampedPathCopies instance A and B.
574 583 //
575 584 // If A contains N items, the merge result will never contains more
576 585 // than N values differents than the one in A
577 586 //
578 587 // If B contains M items, with M > N, the merge result will always
579 588 // result in a minimum of M - N value differents than the on in
580 589 // A
581 590 //
582 591 // As a result, if N < (M-N), we know that simply iterating over A will
583 592 // yield less difference than iterating over the difference
584 593 // between A and B.
585 594 //
586 595 // This help performance a lot in case were a tiny
587 596 // TimeStampedPathCopies is merged with a much larger one.
588 597 for (dest, src_minor) in minor {
589 598 let src_major = major.get(&dest);
590 599 match src_major {
591 600 None => major.insert(dest, src_minor),
592 601 Some(src_major) => {
593 602 match cmp_value(&dest, &src_minor, src_major) {
594 603 MergePick::Any | MergePick::Major => None,
595 604 MergePick::Minor => major.insert(dest, src_minor),
596 605 }
597 606 }
598 607 };
599 608 }
600 609 major
601 610 } else if major.len() * 2 < minor.len() {
602 611 // This use the same rational than the previous block.
603 612 // (Check previous block documentation for details.)
604 613 for (dest, src_major) in major {
605 614 let src_minor = minor.get(&dest);
606 615 match src_minor {
607 616 None => minor.insert(dest, src_major),
608 617 Some(src_minor) => {
609 618 match cmp_value(&dest, src_minor, &src_major) {
610 619 MergePick::Any | MergePick::Minor => None,
611 620 MergePick::Major => minor.insert(dest, src_major),
612 621 }
613 622 }
614 623 };
615 624 }
616 625 minor
617 626 } else {
618 627 let mut override_minor = Vec::new();
619 628 let mut override_major = Vec::new();
620 629
621 630 let mut to_major = |k: &PathToken, v: &TimeStampedPathCopy| {
622 631 override_major.push((k.clone(), v.clone()))
623 632 };
624 633 let mut to_minor = |k: &PathToken, v: &TimeStampedPathCopy| {
625 634 override_minor.push((k.clone(), v.clone()))
626 635 };
627 636
628 637 // The diff function leverage detection of the identical subpart if
629 638 // minor and major has some common ancestors. This make it very
630 639 // fast is most case.
631 640 //
632 641 // In case where the two map are vastly different in size, the current
633 642 // approach is still slowish because the iteration will iterate over
634 643 // all the "exclusive" content of the larger on. This situation can be
635 644 // frequent when the subgraph of revision we are processing has a lot
636 645 // of roots. Each roots adding they own fully new map to the mix (and
637 646 // likely a small map, if the path from the root to the "main path" is
638 647 // small.
639 648 //
640 649 // We could do better by detecting such situation and processing them
641 650 // differently.
642 651 for d in minor.diff(&major) {
643 652 match d {
644 653 DiffItem::Add(k, v) => to_minor(k, v),
645 654 DiffItem::Remove(k, v) => to_major(k, v),
646 655 DiffItem::Update { old, new } => {
647 656 let (dest, src_major) = new;
648 657 let (_, src_minor) = old;
649 658 match cmp_value(dest, src_minor, src_major) {
650 659 MergePick::Major => to_minor(dest, src_major),
651 660 MergePick::Minor => to_major(dest, src_minor),
652 661 // If the two entry are identical, no need to do
653 662 // anything (but diff should not have yield them)
654 663 MergePick::Any => unreachable!(),
655 664 }
656 665 }
657 666 };
658 667 }
659 668
660 669 let updates;
661 670 let mut result;
662 671 if override_major.is_empty() {
663 672 result = major
664 673 } else if override_minor.is_empty() {
665 674 result = minor
666 675 } else {
667 676 if override_minor.len() < override_major.len() {
668 677 updates = override_minor;
669 678 result = minor;
670 679 } else {
671 680 updates = override_major;
672 681 result = major;
673 682 }
674 683 for (k, v) in updates {
675 684 result.insert(k, v);
676 685 }
677 686 }
678 687 result
679 688 }
680 689 }
681 690
682 691 /// represent the side that should prevail when merging two
683 692 /// TimeStampedPathCopies
684 693 enum MergePick {
685 694 /// The "major" (p1) side prevails
686 695 Major,
687 696 /// The "minor" (p2) side prevails
688 697 Minor,
689 698 /// Any side could be used (because they are the same)
690 699 Any,
691 700 }
692 701
693 702 /// decide which side prevails in case of conflicting values
694 703 #[allow(clippy::if_same_then_else)]
695 704 fn compare_value<A: Fn(Revision, Revision) -> bool>(
696 705 path_map: &TwoWayPathMap,
697 706 changes: &ChangedFiles,
698 707 oracle: &mut AncestorOracle<A>,
699 708 dest: &PathToken,
700 709 src_minor: &TimeStampedPathCopy,
701 710 src_major: &TimeStampedPathCopy,
702 711 ) -> MergePick {
703 712 if src_major.path == src_minor.path {
704 713 // we have the same value, but from other source;
705 714 if src_major.rev == src_minor.rev {
706 715 // If the two entry are identical, they are both valid
707 716 MergePick::Any
708 717 } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
709 718 MergePick::Minor
710 719 } else {
711 720 MergePick::Major
712 721 }
713 722 } else if src_major.rev == src_minor.rev {
714 723 // We cannot get copy information for both p1 and p2 in the
715 724 // same rev. So this is the same value.
716 725 unreachable!(
717 726 "conflict information from p1 and p2 in the same revision"
718 727 );
719 728 } else {
720 729 let dest_path = path_map.untokenize(*dest);
721 730 let action = changes.get_merge_case(dest_path);
722 731 if src_major.path.is_none() && action == MergeCase::Salvaged {
723 732 // If the file is "deleted" in the major side but was
724 733 // salvaged by the merge, we keep the minor side alive
725 734 MergePick::Minor
726 735 } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
727 736 // If the file is "deleted" in the minor side but was
728 737 // salvaged by the merge, unconditionnaly preserve the
729 738 // major side.
730 739 MergePick::Major
731 740 } else if action == MergeCase::Merged {
732 741 // If the file was actively merged, copy information
733 742 // from each side might conflict. The major side will
734 743 // win such conflict.
735 744 MergePick::Major
736 745 } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
737 746 // If the minor side is strictly newer than the major
738 747 // side, it should be kept.
739 748 MergePick::Minor
740 749 } else if src_major.path.is_some() {
741 750 // without any special case, the "major" value win
742 751 // other the "minor" one.
743 752 MergePick::Major
744 753 } else if oracle.is_overwrite(src_minor.rev, src_major.rev) {
745 754 // the "major" rev is a direct ancestors of "minor",
746 755 // any different value should
747 756 // overwrite
748 757 MergePick::Major
749 758 } else {
750 759 // major version is None (so the file was deleted on
751 760 // that branch) and that branch is independant (neither
752 761 // minor nor major is an ancestors of the other one.)
753 762 // We preserve the new
754 763 // information about the new file.
755 764 MergePick::Minor
756 765 }
757 766 }
758 767 }
General Comments 0
You need to be logged in to leave comments. Login now