##// END OF EJS Templates
copies-rust: start recording overwrite as they happens...
marmoute -
r46770:fce2f20a default
parent child Browse files
Show More
@@ -1,758 +1,767 b''
1 use crate::utils::hg_path::HgPath;
1 use crate::utils::hg_path::HgPath;
2 use crate::utils::hg_path::HgPathBuf;
2 use crate::utils::hg_path::HgPathBuf;
3 use crate::Revision;
3 use crate::Revision;
4 use crate::NULL_REVISION;
4 use crate::NULL_REVISION;
5
5
6 use im_rc::ordmap::DiffItem;
6 use im_rc::ordmap::DiffItem;
7 use im_rc::ordmap::Entry;
7 use im_rc::ordmap::Entry;
8 use im_rc::ordmap::OrdMap;
8 use im_rc::ordmap::OrdMap;
9
9
10 use std::cmp::Ordering;
10 use std::cmp::Ordering;
11 use std::collections::HashMap;
11 use std::collections::HashMap;
12 use std::convert::TryInto;
12 use std::convert::TryInto;
13
13
14 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
14 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
15
15
16 type PathToken = usize;
16 type PathToken = usize;
17
17
18 #[derive(Clone, Debug, PartialEq, Copy)]
18 #[derive(Clone, Debug, PartialEq, Copy)]
19 struct TimeStampedPathCopy {
19 struct TimeStampedPathCopy {
20 /// revision at which the copy information was added
20 /// revision at which the copy information was added
21 rev: Revision,
21 rev: Revision,
22 /// the copy source, (Set to None in case of deletion of the associated
22 /// the copy source, (Set to None in case of deletion of the associated
23 /// key)
23 /// key)
24 path: Option<PathToken>,
24 path: Option<PathToken>,
25 }
25 }
26
26
27 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
27 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
28 type TimeStampedPathCopies = OrdMap<PathToken, TimeStampedPathCopy>;
28 type TimeStampedPathCopies = OrdMap<PathToken, TimeStampedPathCopy>;
29
29
30 /// hold parent 1, parent 2 and relevant files actions.
30 /// hold parent 1, parent 2 and relevant files actions.
31 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
31 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
32
32
33 /// represent the files affected by a changesets
33 /// represent the files affected by a changesets
34 ///
34 ///
35 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
35 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
36 /// all the data categories tracked by it.
36 /// all the data categories tracked by it.
37 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
37 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
38 /// all the data categories tracked by it.
38 /// all the data categories tracked by it.
39 pub struct ChangedFiles<'a> {
39 pub struct ChangedFiles<'a> {
40 nb_items: u32,
40 nb_items: u32,
41 index: &'a [u8],
41 index: &'a [u8],
42 data: &'a [u8],
42 data: &'a [u8],
43 }
43 }
44
44
45 /// Represent active changes that affect the copy tracing.
45 /// Represent active changes that affect the copy tracing.
46 enum Action<'a> {
46 enum Action<'a> {
47 /// The parent ? children edge is removing a file
47 /// The parent ? children edge is removing a file
48 ///
48 ///
49 /// (actually, this could be the edge from the other parent, but it does
49 /// (actually, this could be the edge from the other parent, but it does
50 /// not matters)
50 /// not matters)
51 Removed(&'a HgPath),
51 Removed(&'a HgPath),
52 /// The parent ? children edge introduce copy information between (dest,
52 /// The parent ? children edge introduce copy information between (dest,
53 /// source)
53 /// source)
54 Copied(&'a HgPath, &'a HgPath),
54 Copied(&'a HgPath, &'a HgPath),
55 }
55 }
56
56
57 /// This express the possible "special" case we can get in a merge
57 /// This express the possible "special" case we can get in a merge
58 ///
58 ///
59 /// See mercurial/metadata.py for details on these values.
59 /// See mercurial/metadata.py for details on these values.
60 #[derive(PartialEq)]
60 #[derive(PartialEq)]
61 enum MergeCase {
61 enum MergeCase {
62 /// Merged: file had history on both side that needed to be merged
62 /// Merged: file had history on both side that needed to be merged
63 Merged,
63 Merged,
64 /// Salvaged: file was candidate for deletion, but survived the merge
64 /// Salvaged: file was candidate for deletion, but survived the merge
65 Salvaged,
65 Salvaged,
66 /// Normal: Not one of the two cases above
66 /// Normal: Not one of the two cases above
67 Normal,
67 Normal,
68 }
68 }
69
69
70 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
70 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
71
71
72 const EMPTY: &[u8] = b"";
72 const EMPTY: &[u8] = b"";
73 const COPY_MASK: u8 = 3;
73 const COPY_MASK: u8 = 3;
74 const P1_COPY: u8 = 2;
74 const P1_COPY: u8 = 2;
75 const P2_COPY: u8 = 3;
75 const P2_COPY: u8 = 3;
76 const ACTION_MASK: u8 = 28;
76 const ACTION_MASK: u8 = 28;
77 const REMOVED: u8 = 12;
77 const REMOVED: u8 = 12;
78 const MERGED: u8 = 8;
78 const MERGED: u8 = 8;
79 const SALVAGED: u8 = 16;
79 const SALVAGED: u8 = 16;
80
80
81 impl<'a> ChangedFiles<'a> {
81 impl<'a> ChangedFiles<'a> {
82 const INDEX_START: usize = 4;
82 const INDEX_START: usize = 4;
83 const ENTRY_SIZE: u32 = 9;
83 const ENTRY_SIZE: u32 = 9;
84 const FILENAME_START: u32 = 1;
84 const FILENAME_START: u32 = 1;
85 const COPY_SOURCE_START: u32 = 5;
85 const COPY_SOURCE_START: u32 = 5;
86
86
87 pub fn new(data: &'a [u8]) -> Self {
87 pub fn new(data: &'a [u8]) -> Self {
88 assert!(
88 assert!(
89 data.len() >= 4,
89 data.len() >= 4,
90 "data size ({}) is too small to contain the header (4)",
90 "data size ({}) is too small to contain the header (4)",
91 data.len()
91 data.len()
92 );
92 );
93 let nb_items_raw: [u8; 4] = (&data[0..=3])
93 let nb_items_raw: [u8; 4] = (&data[0..=3])
94 .try_into()
94 .try_into()
95 .expect("failed to turn 4 bytes into 4 bytes");
95 .expect("failed to turn 4 bytes into 4 bytes");
96 let nb_items = u32::from_be_bytes(nb_items_raw);
96 let nb_items = u32::from_be_bytes(nb_items_raw);
97
97
98 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
98 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
99 let index_end = Self::INDEX_START + index_size;
99 let index_end = Self::INDEX_START + index_size;
100
100
101 assert!(
101 assert!(
102 data.len() >= index_end,
102 data.len() >= index_end,
103 "data size ({}) is too small to fit the index_data ({})",
103 "data size ({}) is too small to fit the index_data ({})",
104 data.len(),
104 data.len(),
105 index_end
105 index_end
106 );
106 );
107
107
108 let ret = ChangedFiles {
108 let ret = ChangedFiles {
109 nb_items,
109 nb_items,
110 index: &data[Self::INDEX_START..index_end],
110 index: &data[Self::INDEX_START..index_end],
111 data: &data[index_end..],
111 data: &data[index_end..],
112 };
112 };
113 let max_data = ret.filename_end(nb_items - 1) as usize;
113 let max_data = ret.filename_end(nb_items - 1) as usize;
114 assert!(
114 assert!(
115 ret.data.len() >= max_data,
115 ret.data.len() >= max_data,
116 "data size ({}) is too small to fit all data ({})",
116 "data size ({}) is too small to fit all data ({})",
117 data.len(),
117 data.len(),
118 index_end + max_data
118 index_end + max_data
119 );
119 );
120 ret
120 ret
121 }
121 }
122
122
123 pub fn new_empty() -> Self {
123 pub fn new_empty() -> Self {
124 ChangedFiles {
124 ChangedFiles {
125 nb_items: 0,
125 nb_items: 0,
126 index: EMPTY,
126 index: EMPTY,
127 data: EMPTY,
127 data: EMPTY,
128 }
128 }
129 }
129 }
130
130
131 /// internal function to return an individual entry at a given index
131 /// internal function to return an individual entry at a given index
132 fn entry(&'a self, idx: u32) -> FileChange<'a> {
132 fn entry(&'a self, idx: u32) -> FileChange<'a> {
133 if idx >= self.nb_items {
133 if idx >= self.nb_items {
134 panic!(
134 panic!(
135 "index for entry is higher that the number of file {} >= {}",
135 "index for entry is higher that the number of file {} >= {}",
136 idx, self.nb_items
136 idx, self.nb_items
137 )
137 )
138 }
138 }
139 let flags = self.flags(idx);
139 let flags = self.flags(idx);
140 let filename = self.filename(idx);
140 let filename = self.filename(idx);
141 let copy_idx = self.copy_idx(idx);
141 let copy_idx = self.copy_idx(idx);
142 let copy_source = self.filename(copy_idx);
142 let copy_source = self.filename(copy_idx);
143 (flags, filename, copy_source)
143 (flags, filename, copy_source)
144 }
144 }
145
145
146 /// internal function to return the filename of the entry at a given index
146 /// internal function to return the filename of the entry at a given index
147 fn filename(&self, idx: u32) -> &HgPath {
147 fn filename(&self, idx: u32) -> &HgPath {
148 let filename_start;
148 let filename_start;
149 if idx == 0 {
149 if idx == 0 {
150 filename_start = 0;
150 filename_start = 0;
151 } else {
151 } else {
152 filename_start = self.filename_end(idx - 1)
152 filename_start = self.filename_end(idx - 1)
153 }
153 }
154 let filename_end = self.filename_end(idx);
154 let filename_end = self.filename_end(idx);
155 let filename_start = filename_start as usize;
155 let filename_start = filename_start as usize;
156 let filename_end = filename_end as usize;
156 let filename_end = filename_end as usize;
157 HgPath::new(&self.data[filename_start..filename_end])
157 HgPath::new(&self.data[filename_start..filename_end])
158 }
158 }
159
159
160 /// internal function to return the flag field of the entry at a given
160 /// internal function to return the flag field of the entry at a given
161 /// index
161 /// index
162 fn flags(&self, idx: u32) -> u8 {
162 fn flags(&self, idx: u32) -> u8 {
163 let idx = idx as usize;
163 let idx = idx as usize;
164 self.index[idx * (Self::ENTRY_SIZE as usize)]
164 self.index[idx * (Self::ENTRY_SIZE as usize)]
165 }
165 }
166
166
167 /// internal function to return the end of a filename part at a given index
167 /// internal function to return the end of a filename part at a given index
168 fn filename_end(&self, idx: u32) -> u32 {
168 fn filename_end(&self, idx: u32) -> u32 {
169 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
169 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
170 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
170 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
171 let start = start as usize;
171 let start = start as usize;
172 let end = end as usize;
172 let end = end as usize;
173 let raw = (&self.index[start..end])
173 let raw = (&self.index[start..end])
174 .try_into()
174 .try_into()
175 .expect("failed to turn 4 bytes into 4 bytes");
175 .expect("failed to turn 4 bytes into 4 bytes");
176 u32::from_be_bytes(raw)
176 u32::from_be_bytes(raw)
177 }
177 }
178
178
179 /// internal function to return index of the copy source of the entry at a
179 /// internal function to return index of the copy source of the entry at a
180 /// given index
180 /// given index
181 fn copy_idx(&self, idx: u32) -> u32 {
181 fn copy_idx(&self, idx: u32) -> u32 {
182 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
182 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
183 let end = (idx + 1) * Self::ENTRY_SIZE;
183 let end = (idx + 1) * Self::ENTRY_SIZE;
184 let start = start as usize;
184 let start = start as usize;
185 let end = end as usize;
185 let end = end as usize;
186 let raw = (&self.index[start..end])
186 let raw = (&self.index[start..end])
187 .try_into()
187 .try_into()
188 .expect("failed to turn 4 bytes into 4 bytes");
188 .expect("failed to turn 4 bytes into 4 bytes");
189 u32::from_be_bytes(raw)
189 u32::from_be_bytes(raw)
190 }
190 }
191
191
192 /// Return an iterator over all the `Action` in this instance.
192 /// Return an iterator over all the `Action` in this instance.
193 fn iter_actions(&self, parent: Parent) -> ActionsIterator {
193 fn iter_actions(&self, parent: Parent) -> ActionsIterator {
194 ActionsIterator {
194 ActionsIterator {
195 changes: &self,
195 changes: &self,
196 parent: parent,
196 parent: parent,
197 current: 0,
197 current: 0,
198 }
198 }
199 }
199 }
200
200
201 /// return the MergeCase value associated with a filename
201 /// return the MergeCase value associated with a filename
202 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
202 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
203 if self.nb_items == 0 {
203 if self.nb_items == 0 {
204 return MergeCase::Normal;
204 return MergeCase::Normal;
205 }
205 }
206 let mut low_part = 0;
206 let mut low_part = 0;
207 let mut high_part = self.nb_items;
207 let mut high_part = self.nb_items;
208
208
209 while low_part < high_part {
209 while low_part < high_part {
210 let cursor = (low_part + high_part - 1) / 2;
210 let cursor = (low_part + high_part - 1) / 2;
211 let (flags, filename, _source) = self.entry(cursor);
211 let (flags, filename, _source) = self.entry(cursor);
212 match path.cmp(filename) {
212 match path.cmp(filename) {
213 Ordering::Less => low_part = cursor + 1,
213 Ordering::Less => low_part = cursor + 1,
214 Ordering::Greater => high_part = cursor,
214 Ordering::Greater => high_part = cursor,
215 Ordering::Equal => {
215 Ordering::Equal => {
216 return match flags & ACTION_MASK {
216 return match flags & ACTION_MASK {
217 MERGED => MergeCase::Merged,
217 MERGED => MergeCase::Merged,
218 SALVAGED => MergeCase::Salvaged,
218 SALVAGED => MergeCase::Salvaged,
219 _ => MergeCase::Normal,
219 _ => MergeCase::Normal,
220 };
220 };
221 }
221 }
222 }
222 }
223 }
223 }
224 MergeCase::Normal
224 MergeCase::Normal
225 }
225 }
226 }
226 }
227
227
228 /// A struct responsible for answering "is X ancestors of Y" quickly
228 /// A struct responsible for answering "is X ancestors of Y" quickly
229 ///
229 ///
230 /// The structure will delegate ancestors call to a callback, and cache the
230 /// The structure will delegate ancestors call to a callback, and cache the
231 /// result.
231 /// result.
232 #[derive(Debug)]
232 #[derive(Debug)]
233 struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
233 struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
234 inner: &'a A,
234 inner: &'a A,
235 pairs: HashMap<(Revision, Revision), bool>,
235 pairs: HashMap<(Revision, Revision), bool>,
236 }
236 }
237
237
238 impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
238 impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
239 fn new(func: &'a A) -> Self {
239 fn new(func: &'a A) -> Self {
240 Self {
240 Self {
241 inner: func,
241 inner: func,
242 pairs: HashMap::default(),
242 pairs: HashMap::default(),
243 }
243 }
244 }
244 }
245
245
246 fn record_overwrite(&mut self, anc: Revision, desc: Revision) {
247 self.pairs.insert((anc, desc), true);
248 }
249
246 /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
250 /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
247 fn is_overwrite(&mut self, anc: Revision, desc: Revision) -> bool {
251 fn is_overwrite(&mut self, anc: Revision, desc: Revision) -> bool {
248 if anc > desc {
252 if anc > desc {
249 false
253 false
250 } else if anc == desc {
254 } else if anc == desc {
251 true
255 true
252 } else {
256 } else {
253 if let Some(b) = self.pairs.get(&(anc, desc)) {
257 if let Some(b) = self.pairs.get(&(anc, desc)) {
254 *b
258 *b
255 } else {
259 } else {
256 let b = (self.inner)(anc, desc);
260 let b = (self.inner)(anc, desc);
257 self.pairs.insert((anc, desc), b);
261 self.pairs.insert((anc, desc), b);
258 b
262 b
259 }
263 }
260 }
264 }
261 }
265 }
262 }
266 }
263
267
264 struct ActionsIterator<'a> {
268 struct ActionsIterator<'a> {
265 changes: &'a ChangedFiles<'a>,
269 changes: &'a ChangedFiles<'a>,
266 parent: Parent,
270 parent: Parent,
267 current: u32,
271 current: u32,
268 }
272 }
269
273
270 impl<'a> Iterator for ActionsIterator<'a> {
274 impl<'a> Iterator for ActionsIterator<'a> {
271 type Item = Action<'a>;
275 type Item = Action<'a>;
272
276
273 fn next(&mut self) -> Option<Action<'a>> {
277 fn next(&mut self) -> Option<Action<'a>> {
274 let copy_flag = match self.parent {
278 let copy_flag = match self.parent {
275 Parent::FirstParent => P1_COPY,
279 Parent::FirstParent => P1_COPY,
276 Parent::SecondParent => P2_COPY,
280 Parent::SecondParent => P2_COPY,
277 };
281 };
278 while self.current < self.changes.nb_items {
282 while self.current < self.changes.nb_items {
279 let (flags, file, source) = self.changes.entry(self.current);
283 let (flags, file, source) = self.changes.entry(self.current);
280 self.current += 1;
284 self.current += 1;
281 if (flags & ACTION_MASK) == REMOVED {
285 if (flags & ACTION_MASK) == REMOVED {
282 return Some(Action::Removed(file));
286 return Some(Action::Removed(file));
283 }
287 }
284 let copy = flags & COPY_MASK;
288 let copy = flags & COPY_MASK;
285 if copy == copy_flag {
289 if copy == copy_flag {
286 return Some(Action::Copied(file, source));
290 return Some(Action::Copied(file, source));
287 }
291 }
288 }
292 }
289 return None;
293 return None;
290 }
294 }
291 }
295 }
292
296
293 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
297 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
294 /// ChangedFiles
298 /// ChangedFiles
295 ///
299 ///
296 /// It is passed to the RevInfoMaker callback who can assign any necessary
300 /// It is passed to the RevInfoMaker callback who can assign any necessary
297 /// content to the `data` attribute. The copy tracing code is responsible for
301 /// content to the `data` attribute. The copy tracing code is responsible for
298 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
302 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
299 pub struct DataHolder<D> {
303 pub struct DataHolder<D> {
300 /// RevInfoMaker callback should assign data referenced by the
304 /// RevInfoMaker callback should assign data referenced by the
301 /// ChangedFiles struct it return to this attribute. The DataHolder
305 /// ChangedFiles struct it return to this attribute. The DataHolder
302 /// lifetime will be at least as long as the ChangedFiles one.
306 /// lifetime will be at least as long as the ChangedFiles one.
303 pub data: Option<D>,
307 pub data: Option<D>,
304 }
308 }
305
309
306 pub type RevInfoMaker<'a, D> =
310 pub type RevInfoMaker<'a, D> =
307 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
311 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
308
312
309 /// enum used to carry information about the parent → child currently processed
313 /// enum used to carry information about the parent → child currently processed
310 #[derive(Copy, Clone, Debug)]
314 #[derive(Copy, Clone, Debug)]
311 enum Parent {
315 enum Parent {
312 /// The `p1(x) → x` edge
316 /// The `p1(x) → x` edge
313 FirstParent,
317 FirstParent,
314 /// The `p2(x) → x` edge
318 /// The `p2(x) → x` edge
315 SecondParent,
319 SecondParent,
316 }
320 }
317
321
318 /// A small "tokenizer" responsible of turning full HgPath into lighter
322 /// A small "tokenizer" responsible of turning full HgPath into lighter
319 /// PathToken
323 /// PathToken
320 ///
324 ///
321 /// Dealing with small object, like integer is much faster, so HgPath input are
325 /// Dealing with small object, like integer is much faster, so HgPath input are
322 /// turned into integer "PathToken" and converted back in the end.
326 /// turned into integer "PathToken" and converted back in the end.
323 #[derive(Clone, Debug, Default)]
327 #[derive(Clone, Debug, Default)]
324 struct TwoWayPathMap {
328 struct TwoWayPathMap {
325 token: HashMap<HgPathBuf, PathToken>,
329 token: HashMap<HgPathBuf, PathToken>,
326 path: Vec<HgPathBuf>,
330 path: Vec<HgPathBuf>,
327 }
331 }
328
332
329 impl TwoWayPathMap {
333 impl TwoWayPathMap {
330 fn tokenize(&mut self, path: &HgPath) -> PathToken {
334 fn tokenize(&mut self, path: &HgPath) -> PathToken {
331 match self.token.get(path) {
335 match self.token.get(path) {
332 Some(a) => *a,
336 Some(a) => *a,
333 None => {
337 None => {
334 let a = self.token.len();
338 let a = self.token.len();
335 let buf = path.to_owned();
339 let buf = path.to_owned();
336 self.path.push(buf.clone());
340 self.path.push(buf.clone());
337 self.token.insert(buf, a);
341 self.token.insert(buf, a);
338 a
342 a
339 }
343 }
340 }
344 }
341 }
345 }
342
346
343 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
347 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
344 assert!(token < self.path.len(), format!("Unknown token: {}", token));
348 assert!(token < self.path.len(), format!("Unknown token: {}", token));
345 &self.path[token]
349 &self.path[token]
346 }
350 }
347 }
351 }
348
352
349 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
353 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
350 ///
354 ///
351 /// Arguments are:
355 /// Arguments are:
352 ///
356 ///
353 /// revs: all revisions to be considered
357 /// revs: all revisions to be considered
354 /// children: a {parent ? [childrens]} mapping
358 /// children: a {parent ? [childrens]} mapping
355 /// target_rev: the final revision we are combining copies to
359 /// target_rev: the final revision we are combining copies to
356 /// rev_info(rev): callback to get revision information:
360 /// rev_info(rev): callback to get revision information:
357 /// * first parent
361 /// * first parent
358 /// * second parent
362 /// * second parent
359 /// * ChangedFiles
363 /// * ChangedFiles
360 /// isancestors(low_rev, high_rev): callback to check if a revision is an
364 /// isancestors(low_rev, high_rev): callback to check if a revision is an
361 /// ancestor of another
365 /// ancestor of another
362 pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
366 pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
363 revs: Vec<Revision>,
367 revs: Vec<Revision>,
364 mut children_count: HashMap<Revision, usize>,
368 mut children_count: HashMap<Revision, usize>,
365 target_rev: Revision,
369 target_rev: Revision,
366 rev_info: RevInfoMaker<D>,
370 rev_info: RevInfoMaker<D>,
367 is_ancestor: &A,
371 is_ancestor: &A,
368 ) -> PathCopies {
372 ) -> PathCopies {
369 let mut all_copies = HashMap::new();
373 let mut all_copies = HashMap::new();
370 let mut oracle = AncestorOracle::new(is_ancestor);
374 let mut oracle = AncestorOracle::new(is_ancestor);
371
375
372 let mut path_map = TwoWayPathMap::default();
376 let mut path_map = TwoWayPathMap::default();
373
377
374 for rev in revs {
378 for rev in revs {
375 let mut d: DataHolder<D> = DataHolder { data: None };
379 let mut d: DataHolder<D> = DataHolder { data: None };
376 let (p1, p2, changes) = rev_info(rev, &mut d);
380 let (p1, p2, changes) = rev_info(rev, &mut d);
377
381
378 // We will chain the copies information accumulated for the parent with
382 // We will chain the copies information accumulated for the parent with
379 // the individual copies information the curent revision. Creating a
383 // the individual copies information the curent revision. Creating a
380 // new TimeStampedPath for each `rev` → `children` vertex.
384 // new TimeStampedPath for each `rev` → `children` vertex.
381 let mut copies: Option<TimeStampedPathCopies> = None;
385 let mut copies: Option<TimeStampedPathCopies> = None;
382 if p1 != NULL_REVISION {
386 if p1 != NULL_REVISION {
383 // Retrieve data computed in a previous iteration
387 // Retrieve data computed in a previous iteration
384 let parent_copies = get_and_clean_parent_copies(
388 let parent_copies = get_and_clean_parent_copies(
385 &mut all_copies,
389 &mut all_copies,
386 &mut children_count,
390 &mut children_count,
387 p1,
391 p1,
388 );
392 );
389 if let Some(parent_copies) = parent_copies {
393 if let Some(parent_copies) = parent_copies {
390 // combine it with data for that revision
394 // combine it with data for that revision
391 let vertex_copies = add_from_changes(
395 let vertex_copies = add_from_changes(
392 &mut path_map,
396 &mut path_map,
397 &mut oracle,
393 &parent_copies,
398 &parent_copies,
394 &changes,
399 &changes,
395 Parent::FirstParent,
400 Parent::FirstParent,
396 rev,
401 rev,
397 );
402 );
398 // keep that data around for potential later combination
403 // keep that data around for potential later combination
399 copies = Some(vertex_copies);
404 copies = Some(vertex_copies);
400 }
405 }
401 }
406 }
402 if p2 != NULL_REVISION {
407 if p2 != NULL_REVISION {
403 // Retrieve data computed in a previous iteration
408 // Retrieve data computed in a previous iteration
404 let parent_copies = get_and_clean_parent_copies(
409 let parent_copies = get_and_clean_parent_copies(
405 &mut all_copies,
410 &mut all_copies,
406 &mut children_count,
411 &mut children_count,
407 p2,
412 p2,
408 );
413 );
409 if let Some(parent_copies) = parent_copies {
414 if let Some(parent_copies) = parent_copies {
410 // combine it with data for that revision
415 // combine it with data for that revision
411 let vertex_copies = add_from_changes(
416 let vertex_copies = add_from_changes(
412 &mut path_map,
417 &mut path_map,
418 &mut oracle,
413 &parent_copies,
419 &parent_copies,
414 &changes,
420 &changes,
415 Parent::SecondParent,
421 Parent::SecondParent,
416 rev,
422 rev,
417 );
423 );
418
424
419 copies = match copies {
425 copies = match copies {
420 None => Some(vertex_copies),
426 None => Some(vertex_copies),
421 // Merge has two parents needs to combines their copy
427 // Merge has two parents needs to combines their copy
422 // information.
428 // information.
423 //
429 //
424 // If we got data from both parents, We need to combine
430 // If we got data from both parents, We need to combine
425 // them.
431 // them.
426 Some(copies) => Some(merge_copies_dict(
432 Some(copies) => Some(merge_copies_dict(
427 &path_map,
433 &path_map,
428 vertex_copies,
434 vertex_copies,
429 copies,
435 copies,
430 &changes,
436 &changes,
431 &mut oracle,
437 &mut oracle,
432 )),
438 )),
433 };
439 };
434 }
440 }
435 }
441 }
436 match copies {
442 match copies {
437 Some(copies) => {
443 Some(copies) => {
438 all_copies.insert(rev, copies);
444 all_copies.insert(rev, copies);
439 }
445 }
440 _ => {}
446 _ => {}
441 }
447 }
442 }
448 }
443
449
444 // Drop internal information (like the timestamp) and return the final
450 // Drop internal information (like the timestamp) and return the final
445 // mapping.
451 // mapping.
446 let tt_result = all_copies
452 let tt_result = all_copies
447 .remove(&target_rev)
453 .remove(&target_rev)
448 .expect("target revision was not processed");
454 .expect("target revision was not processed");
449 let mut result = PathCopies::default();
455 let mut result = PathCopies::default();
450 for (dest, tt_source) in tt_result {
456 for (dest, tt_source) in tt_result {
451 if let Some(path) = tt_source.path {
457 if let Some(path) = tt_source.path {
452 let path_dest = path_map.untokenize(dest).to_owned();
458 let path_dest = path_map.untokenize(dest).to_owned();
453 let path_path = path_map.untokenize(path).to_owned();
459 let path_path = path_map.untokenize(path).to_owned();
454 result.insert(path_dest, path_path);
460 result.insert(path_dest, path_path);
455 }
461 }
456 }
462 }
457 result
463 result
458 }
464 }
459
465
460 /// fetch previous computed information
466 /// fetch previous computed information
461 ///
467 ///
462 /// If no other children are expected to need this information, we drop it from
468 /// If no other children are expected to need this information, we drop it from
463 /// the cache.
469 /// the cache.
464 ///
470 ///
465 /// If parent is not part of the set we are expected to walk, return None.
471 /// If parent is not part of the set we are expected to walk, return None.
466 fn get_and_clean_parent_copies(
472 fn get_and_clean_parent_copies(
467 all_copies: &mut HashMap<Revision, TimeStampedPathCopies>,
473 all_copies: &mut HashMap<Revision, TimeStampedPathCopies>,
468 children_count: &mut HashMap<Revision, usize>,
474 children_count: &mut HashMap<Revision, usize>,
469 parent_rev: Revision,
475 parent_rev: Revision,
470 ) -> Option<TimeStampedPathCopies> {
476 ) -> Option<TimeStampedPathCopies> {
471 let count = children_count.get_mut(&parent_rev)?;
477 let count = children_count.get_mut(&parent_rev)?;
472 *count -= 1;
478 *count -= 1;
473 if *count == 0 {
479 if *count == 0 {
474 match all_copies.remove(&parent_rev) {
480 match all_copies.remove(&parent_rev) {
475 Some(c) => Some(c),
481 Some(c) => Some(c),
476 None => Some(TimeStampedPathCopies::default()),
482 None => Some(TimeStampedPathCopies::default()),
477 }
483 }
478 } else {
484 } else {
479 match all_copies.get(&parent_rev) {
485 match all_copies.get(&parent_rev) {
480 Some(c) => Some(c.clone()),
486 Some(c) => Some(c.clone()),
481 None => Some(TimeStampedPathCopies::default()),
487 None => Some(TimeStampedPathCopies::default()),
482 }
488 }
483 }
489 }
484 }
490 }
485
491
486 /// Combine ChangedFiles with some existing PathCopies information and return
492 /// Combine ChangedFiles with some existing PathCopies information and return
487 /// the result
493 /// the result
488 fn add_from_changes(
494 fn add_from_changes<A: Fn(Revision, Revision) -> bool>(
489 path_map: &mut TwoWayPathMap,
495 path_map: &mut TwoWayPathMap,
496 oracle: &mut AncestorOracle<A>,
490 base_copies: &TimeStampedPathCopies,
497 base_copies: &TimeStampedPathCopies,
491 changes: &ChangedFiles,
498 changes: &ChangedFiles,
492 parent: Parent,
499 parent: Parent,
493 current_rev: Revision,
500 current_rev: Revision,
494 ) -> TimeStampedPathCopies {
501 ) -> TimeStampedPathCopies {
495 let mut copies = base_copies.clone();
502 let mut copies = base_copies.clone();
496 for action in changes.iter_actions(parent) {
503 for action in changes.iter_actions(parent) {
497 match action {
504 match action {
498 Action::Copied(path_dest, path_source) => {
505 Action::Copied(path_dest, path_source) => {
499 let dest = path_map.tokenize(path_dest);
506 let dest = path_map.tokenize(path_dest);
500 let source = path_map.tokenize(path_source);
507 let source = path_map.tokenize(path_source);
501 let entry;
508 let entry;
502 if let Some(v) = base_copies.get(&source) {
509 if let Some(v) = base_copies.get(&source) {
503 entry = match &v.path {
510 entry = match &v.path {
504 Some(path) => Some((*(path)).to_owned()),
511 Some(path) => Some((*(path)).to_owned()),
505 None => Some(source.to_owned()),
512 None => Some(source.to_owned()),
506 }
513 }
507 } else {
514 } else {
508 entry = Some(source.to_owned());
515 entry = Some(source.to_owned());
509 }
516 }
510 // Each new entry is introduced by the children, we
517 // Each new entry is introduced by the children, we
511 // record this information as we will need it to take
518 // record this information as we will need it to take
512 // the right decision when merging conflicting copy
519 // the right decision when merging conflicting copy
513 // information. See merge_copies_dict for details.
520 // information. See merge_copies_dict for details.
514 match copies.entry(dest) {
521 match copies.entry(dest) {
515 Entry::Vacant(slot) => {
522 Entry::Vacant(slot) => {
516 let ttpc = TimeStampedPathCopy {
523 let ttpc = TimeStampedPathCopy {
517 rev: current_rev,
524 rev: current_rev,
518 path: entry,
525 path: entry,
519 };
526 };
520 slot.insert(ttpc);
527 slot.insert(ttpc);
521 }
528 }
522 Entry::Occupied(mut slot) => {
529 Entry::Occupied(mut slot) => {
523 let mut ttpc = slot.get_mut();
530 let mut ttpc = slot.get_mut();
531 oracle.record_overwrite(ttpc.rev, current_rev);
524 ttpc.rev = current_rev;
532 ttpc.rev = current_rev;
525 ttpc.path = entry;
533 ttpc.path = entry;
526 }
534 }
527 }
535 }
528 }
536 }
529 Action::Removed(deleted_path) => {
537 Action::Removed(deleted_path) => {
530 // We must drop copy information for removed file.
538 // We must drop copy information for removed file.
531 //
539 //
532 // We need to explicitly record them as dropped to
540 // We need to explicitly record them as dropped to
533 // propagate this information when merging two
541 // propagate this information when merging two
534 // TimeStampedPathCopies object.
542 // TimeStampedPathCopies object.
535 let deleted = path_map.tokenize(deleted_path);
543 let deleted = path_map.tokenize(deleted_path);
536 copies.entry(deleted).and_modify(|old| {
544 copies.entry(deleted).and_modify(|old| {
545 oracle.record_overwrite(old.rev, current_rev);
537 old.rev = current_rev;
546 old.rev = current_rev;
538 old.path = None;
547 old.path = None;
539 });
548 });
540 }
549 }
541 }
550 }
542 }
551 }
543 copies
552 copies
544 }
553 }
545
554
546 /// merge two copies-mapping together, minor and major
555 /// merge two copies-mapping together, minor and major
547 ///
556 ///
548 /// In case of conflict, value from "major" will be picked, unless in some
557 /// In case of conflict, value from "major" will be picked, unless in some
549 /// cases. See inline documentation for details.
558 /// cases. See inline documentation for details.
550 fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
559 fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
551 path_map: &TwoWayPathMap,
560 path_map: &TwoWayPathMap,
552 mut minor: TimeStampedPathCopies,
561 mut minor: TimeStampedPathCopies,
553 mut major: TimeStampedPathCopies,
562 mut major: TimeStampedPathCopies,
554 changes: &ChangedFiles,
563 changes: &ChangedFiles,
555 oracle: &mut AncestorOracle<A>,
564 oracle: &mut AncestorOracle<A>,
556 ) -> TimeStampedPathCopies {
565 ) -> TimeStampedPathCopies {
557 // This closure exist as temporary help while multiple developper are
566 // This closure exist as temporary help while multiple developper are
558 // actively working on this code. Feel free to re-inline it once this
567 // actively working on this code. Feel free to re-inline it once this
559 // code is more settled.
568 // code is more settled.
560 let mut cmp_value =
569 let mut cmp_value =
561 |dest: &PathToken,
570 |dest: &PathToken,
562 src_minor: &TimeStampedPathCopy,
571 src_minor: &TimeStampedPathCopy,
563 src_major: &TimeStampedPathCopy| {
572 src_major: &TimeStampedPathCopy| {
564 compare_value(
573 compare_value(
565 path_map, changes, oracle, dest, src_minor, src_major,
574 path_map, changes, oracle, dest, src_minor, src_major,
566 )
575 )
567 };
576 };
568 if minor.is_empty() {
577 if minor.is_empty() {
569 major
578 major
570 } else if major.is_empty() {
579 } else if major.is_empty() {
571 minor
580 minor
572 } else if minor.len() * 2 < major.len() {
581 } else if minor.len() * 2 < major.len() {
573 // Lets says we are merging two TimeStampedPathCopies instance A and B.
582 // Lets says we are merging two TimeStampedPathCopies instance A and B.
574 //
583 //
575 // If A contains N items, the merge result will never contains more
584 // If A contains N items, the merge result will never contains more
576 // than N values differents than the one in A
585 // than N values differents than the one in A
577 //
586 //
578 // If B contains M items, with M > N, the merge result will always
587 // If B contains M items, with M > N, the merge result will always
579 // result in a minimum of M - N value differents than the on in
588 // result in a minimum of M - N value differents than the on in
580 // A
589 // A
581 //
590 //
582 // As a result, if N < (M-N), we know that simply iterating over A will
591 // As a result, if N < (M-N), we know that simply iterating over A will
583 // yield less difference than iterating over the difference
592 // yield less difference than iterating over the difference
584 // between A and B.
593 // between A and B.
585 //
594 //
586 // This help performance a lot in case were a tiny
595 // This help performance a lot in case were a tiny
587 // TimeStampedPathCopies is merged with a much larger one.
596 // TimeStampedPathCopies is merged with a much larger one.
588 for (dest, src_minor) in minor {
597 for (dest, src_minor) in minor {
589 let src_major = major.get(&dest);
598 let src_major = major.get(&dest);
590 match src_major {
599 match src_major {
591 None => major.insert(dest, src_minor),
600 None => major.insert(dest, src_minor),
592 Some(src_major) => {
601 Some(src_major) => {
593 match cmp_value(&dest, &src_minor, src_major) {
602 match cmp_value(&dest, &src_minor, src_major) {
594 MergePick::Any | MergePick::Major => None,
603 MergePick::Any | MergePick::Major => None,
595 MergePick::Minor => major.insert(dest, src_minor),
604 MergePick::Minor => major.insert(dest, src_minor),
596 }
605 }
597 }
606 }
598 };
607 };
599 }
608 }
600 major
609 major
601 } else if major.len() * 2 < minor.len() {
610 } else if major.len() * 2 < minor.len() {
602 // This use the same rational than the previous block.
611 // This use the same rational than the previous block.
603 // (Check previous block documentation for details.)
612 // (Check previous block documentation for details.)
604 for (dest, src_major) in major {
613 for (dest, src_major) in major {
605 let src_minor = minor.get(&dest);
614 let src_minor = minor.get(&dest);
606 match src_minor {
615 match src_minor {
607 None => minor.insert(dest, src_major),
616 None => minor.insert(dest, src_major),
608 Some(src_minor) => {
617 Some(src_minor) => {
609 match cmp_value(&dest, src_minor, &src_major) {
618 match cmp_value(&dest, src_minor, &src_major) {
610 MergePick::Any | MergePick::Minor => None,
619 MergePick::Any | MergePick::Minor => None,
611 MergePick::Major => minor.insert(dest, src_major),
620 MergePick::Major => minor.insert(dest, src_major),
612 }
621 }
613 }
622 }
614 };
623 };
615 }
624 }
616 minor
625 minor
617 } else {
626 } else {
618 let mut override_minor = Vec::new();
627 let mut override_minor = Vec::new();
619 let mut override_major = Vec::new();
628 let mut override_major = Vec::new();
620
629
621 let mut to_major = |k: &PathToken, v: &TimeStampedPathCopy| {
630 let mut to_major = |k: &PathToken, v: &TimeStampedPathCopy| {
622 override_major.push((k.clone(), v.clone()))
631 override_major.push((k.clone(), v.clone()))
623 };
632 };
624 let mut to_minor = |k: &PathToken, v: &TimeStampedPathCopy| {
633 let mut to_minor = |k: &PathToken, v: &TimeStampedPathCopy| {
625 override_minor.push((k.clone(), v.clone()))
634 override_minor.push((k.clone(), v.clone()))
626 };
635 };
627
636
628 // The diff function leverage detection of the identical subpart if
637 // The diff function leverage detection of the identical subpart if
629 // minor and major has some common ancestors. This make it very
638 // minor and major has some common ancestors. This make it very
630 // fast is most case.
639 // fast is most case.
631 //
640 //
632 // In case where the two map are vastly different in size, the current
641 // In case where the two map are vastly different in size, the current
633 // approach is still slowish because the iteration will iterate over
642 // approach is still slowish because the iteration will iterate over
634 // all the "exclusive" content of the larger on. This situation can be
643 // all the "exclusive" content of the larger on. This situation can be
635 // frequent when the subgraph of revision we are processing has a lot
644 // frequent when the subgraph of revision we are processing has a lot
636 // of roots. Each roots adding they own fully new map to the mix (and
645 // of roots. Each roots adding they own fully new map to the mix (and
637 // likely a small map, if the path from the root to the "main path" is
646 // likely a small map, if the path from the root to the "main path" is
638 // small.
647 // small.
639 //
648 //
640 // We could do better by detecting such situation and processing them
649 // We could do better by detecting such situation and processing them
641 // differently.
650 // differently.
642 for d in minor.diff(&major) {
651 for d in minor.diff(&major) {
643 match d {
652 match d {
644 DiffItem::Add(k, v) => to_minor(k, v),
653 DiffItem::Add(k, v) => to_minor(k, v),
645 DiffItem::Remove(k, v) => to_major(k, v),
654 DiffItem::Remove(k, v) => to_major(k, v),
646 DiffItem::Update { old, new } => {
655 DiffItem::Update { old, new } => {
647 let (dest, src_major) = new;
656 let (dest, src_major) = new;
648 let (_, src_minor) = old;
657 let (_, src_minor) = old;
649 match cmp_value(dest, src_minor, src_major) {
658 match cmp_value(dest, src_minor, src_major) {
650 MergePick::Major => to_minor(dest, src_major),
659 MergePick::Major => to_minor(dest, src_major),
651 MergePick::Minor => to_major(dest, src_minor),
660 MergePick::Minor => to_major(dest, src_minor),
652 // If the two entry are identical, no need to do
661 // If the two entry are identical, no need to do
653 // anything (but diff should not have yield them)
662 // anything (but diff should not have yield them)
654 MergePick::Any => unreachable!(),
663 MergePick::Any => unreachable!(),
655 }
664 }
656 }
665 }
657 };
666 };
658 }
667 }
659
668
660 let updates;
669 let updates;
661 let mut result;
670 let mut result;
662 if override_major.is_empty() {
671 if override_major.is_empty() {
663 result = major
672 result = major
664 } else if override_minor.is_empty() {
673 } else if override_minor.is_empty() {
665 result = minor
674 result = minor
666 } else {
675 } else {
667 if override_minor.len() < override_major.len() {
676 if override_minor.len() < override_major.len() {
668 updates = override_minor;
677 updates = override_minor;
669 result = minor;
678 result = minor;
670 } else {
679 } else {
671 updates = override_major;
680 updates = override_major;
672 result = major;
681 result = major;
673 }
682 }
674 for (k, v) in updates {
683 for (k, v) in updates {
675 result.insert(k, v);
684 result.insert(k, v);
676 }
685 }
677 }
686 }
678 result
687 result
679 }
688 }
680 }
689 }
681
690
682 /// represent the side that should prevail when merging two
691 /// represent the side that should prevail when merging two
683 /// TimeStampedPathCopies
692 /// TimeStampedPathCopies
684 enum MergePick {
693 enum MergePick {
685 /// The "major" (p1) side prevails
694 /// The "major" (p1) side prevails
686 Major,
695 Major,
687 /// The "minor" (p2) side prevails
696 /// The "minor" (p2) side prevails
688 Minor,
697 Minor,
689 /// Any side could be used (because they are the same)
698 /// Any side could be used (because they are the same)
690 Any,
699 Any,
691 }
700 }
692
701
693 /// decide which side prevails in case of conflicting values
702 /// decide which side prevails in case of conflicting values
694 #[allow(clippy::if_same_then_else)]
703 #[allow(clippy::if_same_then_else)]
695 fn compare_value<A: Fn(Revision, Revision) -> bool>(
704 fn compare_value<A: Fn(Revision, Revision) -> bool>(
696 path_map: &TwoWayPathMap,
705 path_map: &TwoWayPathMap,
697 changes: &ChangedFiles,
706 changes: &ChangedFiles,
698 oracle: &mut AncestorOracle<A>,
707 oracle: &mut AncestorOracle<A>,
699 dest: &PathToken,
708 dest: &PathToken,
700 src_minor: &TimeStampedPathCopy,
709 src_minor: &TimeStampedPathCopy,
701 src_major: &TimeStampedPathCopy,
710 src_major: &TimeStampedPathCopy,
702 ) -> MergePick {
711 ) -> MergePick {
703 if src_major.path == src_minor.path {
712 if src_major.path == src_minor.path {
704 // we have the same value, but from other source;
713 // we have the same value, but from other source;
705 if src_major.rev == src_minor.rev {
714 if src_major.rev == src_minor.rev {
706 // If the two entry are identical, they are both valid
715 // If the two entry are identical, they are both valid
707 MergePick::Any
716 MergePick::Any
708 } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
717 } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
709 MergePick::Minor
718 MergePick::Minor
710 } else {
719 } else {
711 MergePick::Major
720 MergePick::Major
712 }
721 }
713 } else if src_major.rev == src_minor.rev {
722 } else if src_major.rev == src_minor.rev {
714 // We cannot get copy information for both p1 and p2 in the
723 // We cannot get copy information for both p1 and p2 in the
715 // same rev. So this is the same value.
724 // same rev. So this is the same value.
716 unreachable!(
725 unreachable!(
717 "conflict information from p1 and p2 in the same revision"
726 "conflict information from p1 and p2 in the same revision"
718 );
727 );
719 } else {
728 } else {
720 let dest_path = path_map.untokenize(*dest);
729 let dest_path = path_map.untokenize(*dest);
721 let action = changes.get_merge_case(dest_path);
730 let action = changes.get_merge_case(dest_path);
722 if src_major.path.is_none() && action == MergeCase::Salvaged {
731 if src_major.path.is_none() && action == MergeCase::Salvaged {
723 // If the file is "deleted" in the major side but was
732 // If the file is "deleted" in the major side but was
724 // salvaged by the merge, we keep the minor side alive
733 // salvaged by the merge, we keep the minor side alive
725 MergePick::Minor
734 MergePick::Minor
726 } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
735 } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
727 // If the file is "deleted" in the minor side but was
736 // If the file is "deleted" in the minor side but was
728 // salvaged by the merge, unconditionnaly preserve the
737 // salvaged by the merge, unconditionnaly preserve the
729 // major side.
738 // major side.
730 MergePick::Major
739 MergePick::Major
731 } else if action == MergeCase::Merged {
740 } else if action == MergeCase::Merged {
732 // If the file was actively merged, copy information
741 // If the file was actively merged, copy information
733 // from each side might conflict. The major side will
742 // from each side might conflict. The major side will
734 // win such conflict.
743 // win such conflict.
735 MergePick::Major
744 MergePick::Major
736 } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
745 } else if oracle.is_overwrite(src_major.rev, src_minor.rev) {
737 // If the minor side is strictly newer than the major
746 // If the minor side is strictly newer than the major
738 // side, it should be kept.
747 // side, it should be kept.
739 MergePick::Minor
748 MergePick::Minor
740 } else if src_major.path.is_some() {
749 } else if src_major.path.is_some() {
741 // without any special case, the "major" value win
750 // without any special case, the "major" value win
742 // other the "minor" one.
751 // other the "minor" one.
743 MergePick::Major
752 MergePick::Major
744 } else if oracle.is_overwrite(src_minor.rev, src_major.rev) {
753 } else if oracle.is_overwrite(src_minor.rev, src_major.rev) {
745 // the "major" rev is a direct ancestors of "minor",
754 // the "major" rev is a direct ancestors of "minor",
746 // any different value should
755 // any different value should
747 // overwrite
756 // overwrite
748 MergePick::Major
757 MergePick::Major
749 } else {
758 } else {
750 // major version is None (so the file was deleted on
759 // major version is None (so the file was deleted on
751 // that branch) and that branch is independant (neither
760 // that branch) and that branch is independant (neither
752 // minor nor major is an ancestors of the other one.)
761 // minor nor major is an ancestors of the other one.)
753 // We preserve the new
762 // We preserve the new
754 // information about the new file.
763 // information about the new file.
755 MergePick::Minor
764 MergePick::Minor
756 }
765 }
757 }
766 }
758 }
767 }
General Comments 0
You need to be logged in to leave comments. Login now