##// END OF EJS Templates
copies-rust: use the entry API to overwrite deleted entry...
marmoute -
r46767:e166e8a0 default
parent child Browse files
Show More
@@ -1,751 +1,748
1 use crate::utils::hg_path::HgPath;
1 use crate::utils::hg_path::HgPath;
2 use crate::utils::hg_path::HgPathBuf;
2 use crate::utils::hg_path::HgPathBuf;
3 use crate::Revision;
3 use crate::Revision;
4 use crate::NULL_REVISION;
4 use crate::NULL_REVISION;
5
5
6 use im_rc::ordmap::DiffItem;
6 use im_rc::ordmap::DiffItem;
7 use im_rc::ordmap::OrdMap;
7 use im_rc::ordmap::OrdMap;
8
8
9 use std::cmp::Ordering;
9 use std::cmp::Ordering;
10 use std::collections::HashMap;
10 use std::collections::HashMap;
11 use std::convert::TryInto;
11 use std::convert::TryInto;
12
12
13 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
13 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
14
14
15 type PathToken = usize;
15 type PathToken = usize;
16
16
17 #[derive(Clone, Debug, PartialEq, Copy)]
17 #[derive(Clone, Debug, PartialEq, Copy)]
18 struct TimeStampedPathCopy {
18 struct TimeStampedPathCopy {
19 /// revision at which the copy information was added
19 /// revision at which the copy information was added
20 rev: Revision,
20 rev: Revision,
21 /// the copy source, (Set to None in case of deletion of the associated
21 /// the copy source, (Set to None in case of deletion of the associated
22 /// key)
22 /// key)
23 path: Option<PathToken>,
23 path: Option<PathToken>,
24 }
24 }
25
25
26 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
26 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
27 type TimeStampedPathCopies = OrdMap<PathToken, TimeStampedPathCopy>;
27 type TimeStampedPathCopies = OrdMap<PathToken, TimeStampedPathCopy>;
28
28
29 /// hold parent 1, parent 2 and relevant files actions.
29 /// hold parent 1, parent 2 and relevant files actions.
30 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
30 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
31
31
32 /// represent the files affected by a changesets
32 /// represent the files affected by a changesets
33 ///
33 ///
34 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
34 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
35 /// all the data categories tracked by it.
35 /// all the data categories tracked by it.
36 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
36 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
37 /// all the data categories tracked by it.
37 /// all the data categories tracked by it.
38 pub struct ChangedFiles<'a> {
38 pub struct ChangedFiles<'a> {
39 nb_items: u32,
39 nb_items: u32,
40 index: &'a [u8],
40 index: &'a [u8],
41 data: &'a [u8],
41 data: &'a [u8],
42 }
42 }
43
43
44 /// Represent active changes that affect the copy tracing.
44 /// Represent active changes that affect the copy tracing.
45 enum Action<'a> {
45 enum Action<'a> {
46 /// The parent ? children edge is removing a file
46 /// The parent ? children edge is removing a file
47 ///
47 ///
48 /// (actually, this could be the edge from the other parent, but it does
48 /// (actually, this could be the edge from the other parent, but it does
49 /// not matters)
49 /// not matters)
50 Removed(&'a HgPath),
50 Removed(&'a HgPath),
51 /// The parent ? children edge introduce copy information between (dest,
51 /// The parent ? children edge introduce copy information between (dest,
52 /// source)
52 /// source)
53 Copied(&'a HgPath, &'a HgPath),
53 Copied(&'a HgPath, &'a HgPath),
54 }
54 }
55
55
56 /// This express the possible "special" case we can get in a merge
56 /// This express the possible "special" case we can get in a merge
57 ///
57 ///
58 /// See mercurial/metadata.py for details on these values.
58 /// See mercurial/metadata.py for details on these values.
59 #[derive(PartialEq)]
59 #[derive(PartialEq)]
60 enum MergeCase {
60 enum MergeCase {
61 /// Merged: file had history on both side that needed to be merged
61 /// Merged: file had history on both side that needed to be merged
62 Merged,
62 Merged,
63 /// Salvaged: file was candidate for deletion, but survived the merge
63 /// Salvaged: file was candidate for deletion, but survived the merge
64 Salvaged,
64 Salvaged,
65 /// Normal: Not one of the two cases above
65 /// Normal: Not one of the two cases above
66 Normal,
66 Normal,
67 }
67 }
68
68
69 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
69 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
70
70
71 const EMPTY: &[u8] = b"";
71 const EMPTY: &[u8] = b"";
72 const COPY_MASK: u8 = 3;
72 const COPY_MASK: u8 = 3;
73 const P1_COPY: u8 = 2;
73 const P1_COPY: u8 = 2;
74 const P2_COPY: u8 = 3;
74 const P2_COPY: u8 = 3;
75 const ACTION_MASK: u8 = 28;
75 const ACTION_MASK: u8 = 28;
76 const REMOVED: u8 = 12;
76 const REMOVED: u8 = 12;
77 const MERGED: u8 = 8;
77 const MERGED: u8 = 8;
78 const SALVAGED: u8 = 16;
78 const SALVAGED: u8 = 16;
79
79
80 impl<'a> ChangedFiles<'a> {
80 impl<'a> ChangedFiles<'a> {
81 const INDEX_START: usize = 4;
81 const INDEX_START: usize = 4;
82 const ENTRY_SIZE: u32 = 9;
82 const ENTRY_SIZE: u32 = 9;
83 const FILENAME_START: u32 = 1;
83 const FILENAME_START: u32 = 1;
84 const COPY_SOURCE_START: u32 = 5;
84 const COPY_SOURCE_START: u32 = 5;
85
85
86 pub fn new(data: &'a [u8]) -> Self {
86 pub fn new(data: &'a [u8]) -> Self {
87 assert!(
87 assert!(
88 data.len() >= 4,
88 data.len() >= 4,
89 "data size ({}) is too small to contain the header (4)",
89 "data size ({}) is too small to contain the header (4)",
90 data.len()
90 data.len()
91 );
91 );
92 let nb_items_raw: [u8; 4] = (&data[0..=3])
92 let nb_items_raw: [u8; 4] = (&data[0..=3])
93 .try_into()
93 .try_into()
94 .expect("failed to turn 4 bytes into 4 bytes");
94 .expect("failed to turn 4 bytes into 4 bytes");
95 let nb_items = u32::from_be_bytes(nb_items_raw);
95 let nb_items = u32::from_be_bytes(nb_items_raw);
96
96
97 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
97 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
98 let index_end = Self::INDEX_START + index_size;
98 let index_end = Self::INDEX_START + index_size;
99
99
100 assert!(
100 assert!(
101 data.len() >= index_end,
101 data.len() >= index_end,
102 "data size ({}) is too small to fit the index_data ({})",
102 "data size ({}) is too small to fit the index_data ({})",
103 data.len(),
103 data.len(),
104 index_end
104 index_end
105 );
105 );
106
106
107 let ret = ChangedFiles {
107 let ret = ChangedFiles {
108 nb_items,
108 nb_items,
109 index: &data[Self::INDEX_START..index_end],
109 index: &data[Self::INDEX_START..index_end],
110 data: &data[index_end..],
110 data: &data[index_end..],
111 };
111 };
112 let max_data = ret.filename_end(nb_items - 1) as usize;
112 let max_data = ret.filename_end(nb_items - 1) as usize;
113 assert!(
113 assert!(
114 ret.data.len() >= max_data,
114 ret.data.len() >= max_data,
115 "data size ({}) is too small to fit all data ({})",
115 "data size ({}) is too small to fit all data ({})",
116 data.len(),
116 data.len(),
117 index_end + max_data
117 index_end + max_data
118 );
118 );
119 ret
119 ret
120 }
120 }
121
121
122 pub fn new_empty() -> Self {
122 pub fn new_empty() -> Self {
123 ChangedFiles {
123 ChangedFiles {
124 nb_items: 0,
124 nb_items: 0,
125 index: EMPTY,
125 index: EMPTY,
126 data: EMPTY,
126 data: EMPTY,
127 }
127 }
128 }
128 }
129
129
130 /// internal function to return an individual entry at a given index
130 /// internal function to return an individual entry at a given index
131 fn entry(&'a self, idx: u32) -> FileChange<'a> {
131 fn entry(&'a self, idx: u32) -> FileChange<'a> {
132 if idx >= self.nb_items {
132 if idx >= self.nb_items {
133 panic!(
133 panic!(
134 "index for entry is higher that the number of file {} >= {}",
134 "index for entry is higher that the number of file {} >= {}",
135 idx, self.nb_items
135 idx, self.nb_items
136 )
136 )
137 }
137 }
138 let flags = self.flags(idx);
138 let flags = self.flags(idx);
139 let filename = self.filename(idx);
139 let filename = self.filename(idx);
140 let copy_idx = self.copy_idx(idx);
140 let copy_idx = self.copy_idx(idx);
141 let copy_source = self.filename(copy_idx);
141 let copy_source = self.filename(copy_idx);
142 (flags, filename, copy_source)
142 (flags, filename, copy_source)
143 }
143 }
144
144
145 /// internal function to return the filename of the entry at a given index
145 /// internal function to return the filename of the entry at a given index
146 fn filename(&self, idx: u32) -> &HgPath {
146 fn filename(&self, idx: u32) -> &HgPath {
147 let filename_start;
147 let filename_start;
148 if idx == 0 {
148 if idx == 0 {
149 filename_start = 0;
149 filename_start = 0;
150 } else {
150 } else {
151 filename_start = self.filename_end(idx - 1)
151 filename_start = self.filename_end(idx - 1)
152 }
152 }
153 let filename_end = self.filename_end(idx);
153 let filename_end = self.filename_end(idx);
154 let filename_start = filename_start as usize;
154 let filename_start = filename_start as usize;
155 let filename_end = filename_end as usize;
155 let filename_end = filename_end as usize;
156 HgPath::new(&self.data[filename_start..filename_end])
156 HgPath::new(&self.data[filename_start..filename_end])
157 }
157 }
158
158
159 /// internal function to return the flag field of the entry at a given
159 /// internal function to return the flag field of the entry at a given
160 /// index
160 /// index
161 fn flags(&self, idx: u32) -> u8 {
161 fn flags(&self, idx: u32) -> u8 {
162 let idx = idx as usize;
162 let idx = idx as usize;
163 self.index[idx * (Self::ENTRY_SIZE as usize)]
163 self.index[idx * (Self::ENTRY_SIZE as usize)]
164 }
164 }
165
165
166 /// internal function to return the end of a filename part at a given index
166 /// internal function to return the end of a filename part at a given index
167 fn filename_end(&self, idx: u32) -> u32 {
167 fn filename_end(&self, idx: u32) -> u32 {
168 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
168 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
169 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
169 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
170 let start = start as usize;
170 let start = start as usize;
171 let end = end as usize;
171 let end = end as usize;
172 let raw = (&self.index[start..end])
172 let raw = (&self.index[start..end])
173 .try_into()
173 .try_into()
174 .expect("failed to turn 4 bytes into 4 bytes");
174 .expect("failed to turn 4 bytes into 4 bytes");
175 u32::from_be_bytes(raw)
175 u32::from_be_bytes(raw)
176 }
176 }
177
177
178 /// internal function to return index of the copy source of the entry at a
178 /// internal function to return index of the copy source of the entry at a
179 /// given index
179 /// given index
180 fn copy_idx(&self, idx: u32) -> u32 {
180 fn copy_idx(&self, idx: u32) -> u32 {
181 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
181 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
182 let end = (idx + 1) * Self::ENTRY_SIZE;
182 let end = (idx + 1) * Self::ENTRY_SIZE;
183 let start = start as usize;
183 let start = start as usize;
184 let end = end as usize;
184 let end = end as usize;
185 let raw = (&self.index[start..end])
185 let raw = (&self.index[start..end])
186 .try_into()
186 .try_into()
187 .expect("failed to turn 4 bytes into 4 bytes");
187 .expect("failed to turn 4 bytes into 4 bytes");
188 u32::from_be_bytes(raw)
188 u32::from_be_bytes(raw)
189 }
189 }
190
190
191 /// Return an iterator over all the `Action` in this instance.
191 /// Return an iterator over all the `Action` in this instance.
192 fn iter_actions(&self, parent: Parent) -> ActionsIterator {
192 fn iter_actions(&self, parent: Parent) -> ActionsIterator {
193 ActionsIterator {
193 ActionsIterator {
194 changes: &self,
194 changes: &self,
195 parent: parent,
195 parent: parent,
196 current: 0,
196 current: 0,
197 }
197 }
198 }
198 }
199
199
200 /// return the MergeCase value associated with a filename
200 /// return the MergeCase value associated with a filename
201 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
201 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
202 if self.nb_items == 0 {
202 if self.nb_items == 0 {
203 return MergeCase::Normal;
203 return MergeCase::Normal;
204 }
204 }
205 let mut low_part = 0;
205 let mut low_part = 0;
206 let mut high_part = self.nb_items;
206 let mut high_part = self.nb_items;
207
207
208 while low_part < high_part {
208 while low_part < high_part {
209 let cursor = (low_part + high_part - 1) / 2;
209 let cursor = (low_part + high_part - 1) / 2;
210 let (flags, filename, _source) = self.entry(cursor);
210 let (flags, filename, _source) = self.entry(cursor);
211 match path.cmp(filename) {
211 match path.cmp(filename) {
212 Ordering::Less => low_part = cursor + 1,
212 Ordering::Less => low_part = cursor + 1,
213 Ordering::Greater => high_part = cursor,
213 Ordering::Greater => high_part = cursor,
214 Ordering::Equal => {
214 Ordering::Equal => {
215 return match flags & ACTION_MASK {
215 return match flags & ACTION_MASK {
216 MERGED => MergeCase::Merged,
216 MERGED => MergeCase::Merged,
217 SALVAGED => MergeCase::Salvaged,
217 SALVAGED => MergeCase::Salvaged,
218 _ => MergeCase::Normal,
218 _ => MergeCase::Normal,
219 };
219 };
220 }
220 }
221 }
221 }
222 }
222 }
223 MergeCase::Normal
223 MergeCase::Normal
224 }
224 }
225 }
225 }
226
226
227 /// A struct responsible for answering "is X ancestors of Y" quickly
227 /// A struct responsible for answering "is X ancestors of Y" quickly
228 ///
228 ///
229 /// The structure will delegate ancestors call to a callback, and cache the
229 /// The structure will delegate ancestors call to a callback, and cache the
230 /// result.
230 /// result.
231 #[derive(Debug)]
231 #[derive(Debug)]
232 struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
232 struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
233 inner: &'a A,
233 inner: &'a A,
234 pairs: HashMap<(Revision, Revision), bool>,
234 pairs: HashMap<(Revision, Revision), bool>,
235 }
235 }
236
236
237 impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
237 impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
238 fn new(func: &'a A) -> Self {
238 fn new(func: &'a A) -> Self {
239 Self {
239 Self {
240 inner: func,
240 inner: func,
241 pairs: HashMap::default(),
241 pairs: HashMap::default(),
242 }
242 }
243 }
243 }
244
244
245 /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
245 /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
246 fn is_ancestor(&mut self, anc: Revision, desc: Revision) -> bool {
246 fn is_ancestor(&mut self, anc: Revision, desc: Revision) -> bool {
247 if anc > desc {
247 if anc > desc {
248 false
248 false
249 } else if anc == desc {
249 } else if anc == desc {
250 true
250 true
251 } else {
251 } else {
252 if let Some(b) = self.pairs.get(&(anc, desc)) {
252 if let Some(b) = self.pairs.get(&(anc, desc)) {
253 *b
253 *b
254 } else {
254 } else {
255 let b = (self.inner)(anc, desc);
255 let b = (self.inner)(anc, desc);
256 self.pairs.insert((anc, desc), b);
256 self.pairs.insert((anc, desc), b);
257 b
257 b
258 }
258 }
259 }
259 }
260 }
260 }
261 }
261 }
262
262
263 struct ActionsIterator<'a> {
263 struct ActionsIterator<'a> {
264 changes: &'a ChangedFiles<'a>,
264 changes: &'a ChangedFiles<'a>,
265 parent: Parent,
265 parent: Parent,
266 current: u32,
266 current: u32,
267 }
267 }
268
268
269 impl<'a> Iterator for ActionsIterator<'a> {
269 impl<'a> Iterator for ActionsIterator<'a> {
270 type Item = Action<'a>;
270 type Item = Action<'a>;
271
271
272 fn next(&mut self) -> Option<Action<'a>> {
272 fn next(&mut self) -> Option<Action<'a>> {
273 let copy_flag = match self.parent {
273 let copy_flag = match self.parent {
274 Parent::FirstParent => P1_COPY,
274 Parent::FirstParent => P1_COPY,
275 Parent::SecondParent => P2_COPY,
275 Parent::SecondParent => P2_COPY,
276 };
276 };
277 while self.current < self.changes.nb_items {
277 while self.current < self.changes.nb_items {
278 let (flags, file, source) = self.changes.entry(self.current);
278 let (flags, file, source) = self.changes.entry(self.current);
279 self.current += 1;
279 self.current += 1;
280 if (flags & ACTION_MASK) == REMOVED {
280 if (flags & ACTION_MASK) == REMOVED {
281 return Some(Action::Removed(file));
281 return Some(Action::Removed(file));
282 }
282 }
283 let copy = flags & COPY_MASK;
283 let copy = flags & COPY_MASK;
284 if copy == copy_flag {
284 if copy == copy_flag {
285 return Some(Action::Copied(file, source));
285 return Some(Action::Copied(file, source));
286 }
286 }
287 }
287 }
288 return None;
288 return None;
289 }
289 }
290 }
290 }
291
291
292 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
292 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
293 /// ChangedFiles
293 /// ChangedFiles
294 ///
294 ///
295 /// It is passed to the RevInfoMaker callback who can assign any necessary
295 /// It is passed to the RevInfoMaker callback who can assign any necessary
296 /// content to the `data` attribute. The copy tracing code is responsible for
296 /// content to the `data` attribute. The copy tracing code is responsible for
297 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
297 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
298 pub struct DataHolder<D> {
298 pub struct DataHolder<D> {
299 /// RevInfoMaker callback should assign data referenced by the
299 /// RevInfoMaker callback should assign data referenced by the
300 /// ChangedFiles struct it return to this attribute. The DataHolder
300 /// ChangedFiles struct it return to this attribute. The DataHolder
301 /// lifetime will be at least as long as the ChangedFiles one.
301 /// lifetime will be at least as long as the ChangedFiles one.
302 pub data: Option<D>,
302 pub data: Option<D>,
303 }
303 }
304
304
305 pub type RevInfoMaker<'a, D> =
305 pub type RevInfoMaker<'a, D> =
306 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
306 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
307
307
308 /// enum used to carry information about the parent β†’ child currently processed
308 /// enum used to carry information about the parent β†’ child currently processed
309 #[derive(Copy, Clone, Debug)]
309 #[derive(Copy, Clone, Debug)]
310 enum Parent {
310 enum Parent {
311 /// The `p1(x) β†’ x` edge
311 /// The `p1(x) β†’ x` edge
312 FirstParent,
312 FirstParent,
313 /// The `p2(x) β†’ x` edge
313 /// The `p2(x) β†’ x` edge
314 SecondParent,
314 SecondParent,
315 }
315 }
316
316
317 /// A small "tokenizer" responsible of turning full HgPath into lighter
317 /// A small "tokenizer" responsible of turning full HgPath into lighter
318 /// PathToken
318 /// PathToken
319 ///
319 ///
320 /// Dealing with small object, like integer is much faster, so HgPath input are
320 /// Dealing with small object, like integer is much faster, so HgPath input are
321 /// turned into integer "PathToken" and converted back in the end.
321 /// turned into integer "PathToken" and converted back in the end.
322 #[derive(Clone, Debug, Default)]
322 #[derive(Clone, Debug, Default)]
323 struct TwoWayPathMap {
323 struct TwoWayPathMap {
324 token: HashMap<HgPathBuf, PathToken>,
324 token: HashMap<HgPathBuf, PathToken>,
325 path: Vec<HgPathBuf>,
325 path: Vec<HgPathBuf>,
326 }
326 }
327
327
328 impl TwoWayPathMap {
328 impl TwoWayPathMap {
329 fn tokenize(&mut self, path: &HgPath) -> PathToken {
329 fn tokenize(&mut self, path: &HgPath) -> PathToken {
330 match self.token.get(path) {
330 match self.token.get(path) {
331 Some(a) => *a,
331 Some(a) => *a,
332 None => {
332 None => {
333 let a = self.token.len();
333 let a = self.token.len();
334 let buf = path.to_owned();
334 let buf = path.to_owned();
335 self.path.push(buf.clone());
335 self.path.push(buf.clone());
336 self.token.insert(buf, a);
336 self.token.insert(buf, a);
337 a
337 a
338 }
338 }
339 }
339 }
340 }
340 }
341
341
342 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
342 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
343 assert!(token < self.path.len(), format!("Unknown token: {}", token));
343 assert!(token < self.path.len(), format!("Unknown token: {}", token));
344 &self.path[token]
344 &self.path[token]
345 }
345 }
346 }
346 }
347
347
348 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
348 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
349 ///
349 ///
350 /// Arguments are:
350 /// Arguments are:
351 ///
351 ///
352 /// revs: all revisions to be considered
352 /// revs: all revisions to be considered
353 /// children: a {parent ? [childrens]} mapping
353 /// children: a {parent ? [childrens]} mapping
354 /// target_rev: the final revision we are combining copies to
354 /// target_rev: the final revision we are combining copies to
355 /// rev_info(rev): callback to get revision information:
355 /// rev_info(rev): callback to get revision information:
356 /// * first parent
356 /// * first parent
357 /// * second parent
357 /// * second parent
358 /// * ChangedFiles
358 /// * ChangedFiles
359 /// isancestors(low_rev, high_rev): callback to check if a revision is an
359 /// isancestors(low_rev, high_rev): callback to check if a revision is an
360 /// ancestor of another
360 /// ancestor of another
361 pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
361 pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
362 revs: Vec<Revision>,
362 revs: Vec<Revision>,
363 mut children_count: HashMap<Revision, usize>,
363 mut children_count: HashMap<Revision, usize>,
364 target_rev: Revision,
364 target_rev: Revision,
365 rev_info: RevInfoMaker<D>,
365 rev_info: RevInfoMaker<D>,
366 is_ancestor: &A,
366 is_ancestor: &A,
367 ) -> PathCopies {
367 ) -> PathCopies {
368 let mut all_copies = HashMap::new();
368 let mut all_copies = HashMap::new();
369 let mut oracle = AncestorOracle::new(is_ancestor);
369 let mut oracle = AncestorOracle::new(is_ancestor);
370
370
371 let mut path_map = TwoWayPathMap::default();
371 let mut path_map = TwoWayPathMap::default();
372
372
373 for rev in revs {
373 for rev in revs {
374 let mut d: DataHolder<D> = DataHolder { data: None };
374 let mut d: DataHolder<D> = DataHolder { data: None };
375 let (p1, p2, changes) = rev_info(rev, &mut d);
375 let (p1, p2, changes) = rev_info(rev, &mut d);
376
376
377 // We will chain the copies information accumulated for the parent with
377 // We will chain the copies information accumulated for the parent with
378 // the individual copies information the curent revision. Creating a
378 // the individual copies information the curent revision. Creating a
379 // new TimeStampedPath for each `rev` β†’ `children` vertex.
379 // new TimeStampedPath for each `rev` β†’ `children` vertex.
380 let mut copies: Option<TimeStampedPathCopies> = None;
380 let mut copies: Option<TimeStampedPathCopies> = None;
381 if p1 != NULL_REVISION {
381 if p1 != NULL_REVISION {
382 // Retrieve data computed in a previous iteration
382 // Retrieve data computed in a previous iteration
383 let parent_copies = get_and_clean_parent_copies(
383 let parent_copies = get_and_clean_parent_copies(
384 &mut all_copies,
384 &mut all_copies,
385 &mut children_count,
385 &mut children_count,
386 p1,
386 p1,
387 );
387 );
388 if let Some(parent_copies) = parent_copies {
388 if let Some(parent_copies) = parent_copies {
389 // combine it with data for that revision
389 // combine it with data for that revision
390 let vertex_copies = add_from_changes(
390 let vertex_copies = add_from_changes(
391 &mut path_map,
391 &mut path_map,
392 &parent_copies,
392 &parent_copies,
393 &changes,
393 &changes,
394 Parent::FirstParent,
394 Parent::FirstParent,
395 rev,
395 rev,
396 );
396 );
397 // keep that data around for potential later combination
397 // keep that data around for potential later combination
398 copies = Some(vertex_copies);
398 copies = Some(vertex_copies);
399 }
399 }
400 }
400 }
401 if p2 != NULL_REVISION {
401 if p2 != NULL_REVISION {
402 // Retrieve data computed in a previous iteration
402 // Retrieve data computed in a previous iteration
403 let parent_copies = get_and_clean_parent_copies(
403 let parent_copies = get_and_clean_parent_copies(
404 &mut all_copies,
404 &mut all_copies,
405 &mut children_count,
405 &mut children_count,
406 p2,
406 p2,
407 );
407 );
408 if let Some(parent_copies) = parent_copies {
408 if let Some(parent_copies) = parent_copies {
409 // combine it with data for that revision
409 // combine it with data for that revision
410 let vertex_copies = add_from_changes(
410 let vertex_copies = add_from_changes(
411 &mut path_map,
411 &mut path_map,
412 &parent_copies,
412 &parent_copies,
413 &changes,
413 &changes,
414 Parent::SecondParent,
414 Parent::SecondParent,
415 rev,
415 rev,
416 );
416 );
417
417
418 copies = match copies {
418 copies = match copies {
419 None => Some(vertex_copies),
419 None => Some(vertex_copies),
420 // Merge has two parents needs to combines their copy
420 // Merge has two parents needs to combines their copy
421 // information.
421 // information.
422 //
422 //
423 // If we got data from both parents, We need to combine
423 // If we got data from both parents, We need to combine
424 // them.
424 // them.
425 Some(copies) => Some(merge_copies_dict(
425 Some(copies) => Some(merge_copies_dict(
426 &path_map,
426 &path_map,
427 vertex_copies,
427 vertex_copies,
428 copies,
428 copies,
429 &changes,
429 &changes,
430 &mut oracle,
430 &mut oracle,
431 )),
431 )),
432 };
432 };
433 }
433 }
434 }
434 }
435 match copies {
435 match copies {
436 Some(copies) => {
436 Some(copies) => {
437 all_copies.insert(rev, copies);
437 all_copies.insert(rev, copies);
438 }
438 }
439 _ => {}
439 _ => {}
440 }
440 }
441 }
441 }
442
442
443 // Drop internal information (like the timestamp) and return the final
443 // Drop internal information (like the timestamp) and return the final
444 // mapping.
444 // mapping.
445 let tt_result = all_copies
445 let tt_result = all_copies
446 .remove(&target_rev)
446 .remove(&target_rev)
447 .expect("target revision was not processed");
447 .expect("target revision was not processed");
448 let mut result = PathCopies::default();
448 let mut result = PathCopies::default();
449 for (dest, tt_source) in tt_result {
449 for (dest, tt_source) in tt_result {
450 if let Some(path) = tt_source.path {
450 if let Some(path) = tt_source.path {
451 let path_dest = path_map.untokenize(dest).to_owned();
451 let path_dest = path_map.untokenize(dest).to_owned();
452 let path_path = path_map.untokenize(path).to_owned();
452 let path_path = path_map.untokenize(path).to_owned();
453 result.insert(path_dest, path_path);
453 result.insert(path_dest, path_path);
454 }
454 }
455 }
455 }
456 result
456 result
457 }
457 }
458
458
459 /// fetch previous computed information
459 /// fetch previous computed information
460 ///
460 ///
461 /// If no other children are expected to need this information, we drop it from
461 /// If no other children are expected to need this information, we drop it from
462 /// the cache.
462 /// the cache.
463 ///
463 ///
464 /// If parent is not part of the set we are expected to walk, return None.
464 /// If parent is not part of the set we are expected to walk, return None.
465 fn get_and_clean_parent_copies(
465 fn get_and_clean_parent_copies(
466 all_copies: &mut HashMap<Revision, TimeStampedPathCopies>,
466 all_copies: &mut HashMap<Revision, TimeStampedPathCopies>,
467 children_count: &mut HashMap<Revision, usize>,
467 children_count: &mut HashMap<Revision, usize>,
468 parent_rev: Revision,
468 parent_rev: Revision,
469 ) -> Option<TimeStampedPathCopies> {
469 ) -> Option<TimeStampedPathCopies> {
470 let count = children_count.get_mut(&parent_rev)?;
470 let count = children_count.get_mut(&parent_rev)?;
471 *count -= 1;
471 *count -= 1;
472 if *count == 0 {
472 if *count == 0 {
473 match all_copies.remove(&parent_rev) {
473 match all_copies.remove(&parent_rev) {
474 Some(c) => Some(c),
474 Some(c) => Some(c),
475 None => Some(TimeStampedPathCopies::default()),
475 None => Some(TimeStampedPathCopies::default()),
476 }
476 }
477 } else {
477 } else {
478 match all_copies.get(&parent_rev) {
478 match all_copies.get(&parent_rev) {
479 Some(c) => Some(c.clone()),
479 Some(c) => Some(c.clone()),
480 None => Some(TimeStampedPathCopies::default()),
480 None => Some(TimeStampedPathCopies::default()),
481 }
481 }
482 }
482 }
483 }
483 }
484
484
485 /// Combine ChangedFiles with some existing PathCopies information and return
485 /// Combine ChangedFiles with some existing PathCopies information and return
486 /// the result
486 /// the result
487 fn add_from_changes(
487 fn add_from_changes(
488 path_map: &mut TwoWayPathMap,
488 path_map: &mut TwoWayPathMap,
489 base_copies: &TimeStampedPathCopies,
489 base_copies: &TimeStampedPathCopies,
490 changes: &ChangedFiles,
490 changes: &ChangedFiles,
491 parent: Parent,
491 parent: Parent,
492 current_rev: Revision,
492 current_rev: Revision,
493 ) -> TimeStampedPathCopies {
493 ) -> TimeStampedPathCopies {
494 let mut copies = base_copies.clone();
494 let mut copies = base_copies.clone();
495 for action in changes.iter_actions(parent) {
495 for action in changes.iter_actions(parent) {
496 match action {
496 match action {
497 Action::Copied(path_dest, path_source) => {
497 Action::Copied(path_dest, path_source) => {
498 let dest = path_map.tokenize(path_dest);
498 let dest = path_map.tokenize(path_dest);
499 let source = path_map.tokenize(path_source);
499 let source = path_map.tokenize(path_source);
500 let entry;
500 let entry;
501 if let Some(v) = base_copies.get(&source) {
501 if let Some(v) = base_copies.get(&source) {
502 entry = match &v.path {
502 entry = match &v.path {
503 Some(path) => Some((*(path)).to_owned()),
503 Some(path) => Some((*(path)).to_owned()),
504 None => Some(source.to_owned()),
504 None => Some(source.to_owned()),
505 }
505 }
506 } else {
506 } else {
507 entry = Some(source.to_owned());
507 entry = Some(source.to_owned());
508 }
508 }
509 // Each new entry is introduced by the children, we
509 // Each new entry is introduced by the children, we
510 // record this information as we will need it to take
510 // record this information as we will need it to take
511 // the right decision when merging conflicting copy
511 // the right decision when merging conflicting copy
512 // information. See merge_copies_dict for details.
512 // information. See merge_copies_dict for details.
513 let ttpc = TimeStampedPathCopy {
513 let ttpc = TimeStampedPathCopy {
514 rev: current_rev,
514 rev: current_rev,
515 path: entry,
515 path: entry,
516 };
516 };
517 copies.insert(dest.to_owned(), ttpc);
517 copies.insert(dest.to_owned(), ttpc);
518 }
518 }
519 Action::Removed(deleted_path) => {
519 Action::Removed(deleted_path) => {
520 // We must drop copy information for removed file.
520 // We must drop copy information for removed file.
521 //
521 //
522 // We need to explicitly record them as dropped to
522 // We need to explicitly record them as dropped to
523 // propagate this information when merging two
523 // propagate this information when merging two
524 // TimeStampedPathCopies object.
524 // TimeStampedPathCopies object.
525 let deleted = path_map.tokenize(deleted_path);
525 let deleted = path_map.tokenize(deleted_path);
526 if copies.contains_key(&deleted) {
526 copies.entry(deleted).and_modify(|old| {
527 let ttpc = TimeStampedPathCopy {
527 old.rev = current_rev;
528 rev: current_rev,
528 old.path = None;
529 path: None,
529 });
530 };
531 copies.insert(deleted, ttpc);
532 }
533 }
530 }
534 }
531 }
535 }
532 }
536 copies
533 copies
537 }
534 }
538
535
539 /// merge two copies-mapping together, minor and major
536 /// merge two copies-mapping together, minor and major
540 ///
537 ///
541 /// In case of conflict, value from "major" will be picked, unless in some
538 /// In case of conflict, value from "major" will be picked, unless in some
542 /// cases. See inline documentation for details.
539 /// cases. See inline documentation for details.
543 fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
540 fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
544 path_map: &TwoWayPathMap,
541 path_map: &TwoWayPathMap,
545 mut minor: TimeStampedPathCopies,
542 mut minor: TimeStampedPathCopies,
546 mut major: TimeStampedPathCopies,
543 mut major: TimeStampedPathCopies,
547 changes: &ChangedFiles,
544 changes: &ChangedFiles,
548 oracle: &mut AncestorOracle<A>,
545 oracle: &mut AncestorOracle<A>,
549 ) -> TimeStampedPathCopies {
546 ) -> TimeStampedPathCopies {
550 // This closure exist as temporary help while multiple developper are
547 // This closure exist as temporary help while multiple developper are
551 // actively working on this code. Feel free to re-inline it once this
548 // actively working on this code. Feel free to re-inline it once this
552 // code is more settled.
549 // code is more settled.
553 let mut cmp_value =
550 let mut cmp_value =
554 |dest: &PathToken,
551 |dest: &PathToken,
555 src_minor: &TimeStampedPathCopy,
552 src_minor: &TimeStampedPathCopy,
556 src_major: &TimeStampedPathCopy| {
553 src_major: &TimeStampedPathCopy| {
557 compare_value(
554 compare_value(
558 path_map, changes, oracle, dest, src_minor, src_major,
555 path_map, changes, oracle, dest, src_minor, src_major,
559 )
556 )
560 };
557 };
561 if minor.is_empty() {
558 if minor.is_empty() {
562 major
559 major
563 } else if major.is_empty() {
560 } else if major.is_empty() {
564 minor
561 minor
565 } else if minor.len() * 2 < major.len() {
562 } else if minor.len() * 2 < major.len() {
566 // Lets says we are merging two TimeStampedPathCopies instance A and B.
563 // Lets says we are merging two TimeStampedPathCopies instance A and B.
567 //
564 //
568 // If A contains N items, the merge result will never contains more
565 // If A contains N items, the merge result will never contains more
569 // than N values differents than the one in A
566 // than N values differents than the one in A
570 //
567 //
571 // If B contains M items, with M > N, the merge result will always
568 // If B contains M items, with M > N, the merge result will always
572 // result in a minimum of M - N value differents than the on in
569 // result in a minimum of M - N value differents than the on in
573 // A
570 // A
574 //
571 //
575 // As a result, if N < (M-N), we know that simply iterating over A will
572 // As a result, if N < (M-N), we know that simply iterating over A will
576 // yield less difference than iterating over the difference
573 // yield less difference than iterating over the difference
577 // between A and B.
574 // between A and B.
578 //
575 //
579 // This help performance a lot in case were a tiny
576 // This help performance a lot in case were a tiny
580 // TimeStampedPathCopies is merged with a much larger one.
577 // TimeStampedPathCopies is merged with a much larger one.
581 for (dest, src_minor) in minor {
578 for (dest, src_minor) in minor {
582 let src_major = major.get(&dest);
579 let src_major = major.get(&dest);
583 match src_major {
580 match src_major {
584 None => major.insert(dest, src_minor),
581 None => major.insert(dest, src_minor),
585 Some(src_major) => {
582 Some(src_major) => {
586 match cmp_value(&dest, &src_minor, src_major) {
583 match cmp_value(&dest, &src_minor, src_major) {
587 MergePick::Any | MergePick::Major => None,
584 MergePick::Any | MergePick::Major => None,
588 MergePick::Minor => major.insert(dest, src_minor),
585 MergePick::Minor => major.insert(dest, src_minor),
589 }
586 }
590 }
587 }
591 };
588 };
592 }
589 }
593 major
590 major
594 } else if major.len() * 2 < minor.len() {
591 } else if major.len() * 2 < minor.len() {
595 // This use the same rational than the previous block.
592 // This use the same rational than the previous block.
596 // (Check previous block documentation for details.)
593 // (Check previous block documentation for details.)
597 for (dest, src_major) in major {
594 for (dest, src_major) in major {
598 let src_minor = minor.get(&dest);
595 let src_minor = minor.get(&dest);
599 match src_minor {
596 match src_minor {
600 None => minor.insert(dest, src_major),
597 None => minor.insert(dest, src_major),
601 Some(src_minor) => {
598 Some(src_minor) => {
602 match cmp_value(&dest, src_minor, &src_major) {
599 match cmp_value(&dest, src_minor, &src_major) {
603 MergePick::Any | MergePick::Minor => None,
600 MergePick::Any | MergePick::Minor => None,
604 MergePick::Major => minor.insert(dest, src_major),
601 MergePick::Major => minor.insert(dest, src_major),
605 }
602 }
606 }
603 }
607 };
604 };
608 }
605 }
609 minor
606 minor
610 } else {
607 } else {
611 let mut override_minor = Vec::new();
608 let mut override_minor = Vec::new();
612 let mut override_major = Vec::new();
609 let mut override_major = Vec::new();
613
610
614 let mut to_major = |k: &PathToken, v: &TimeStampedPathCopy| {
611 let mut to_major = |k: &PathToken, v: &TimeStampedPathCopy| {
615 override_major.push((k.clone(), v.clone()))
612 override_major.push((k.clone(), v.clone()))
616 };
613 };
617 let mut to_minor = |k: &PathToken, v: &TimeStampedPathCopy| {
614 let mut to_minor = |k: &PathToken, v: &TimeStampedPathCopy| {
618 override_minor.push((k.clone(), v.clone()))
615 override_minor.push((k.clone(), v.clone()))
619 };
616 };
620
617
621 // The diff function leverage detection of the identical subpart if
618 // The diff function leverage detection of the identical subpart if
622 // minor and major has some common ancestors. This make it very
619 // minor and major has some common ancestors. This make it very
623 // fast is most case.
620 // fast is most case.
624 //
621 //
625 // In case where the two map are vastly different in size, the current
622 // In case where the two map are vastly different in size, the current
626 // approach is still slowish because the iteration will iterate over
623 // approach is still slowish because the iteration will iterate over
627 // all the "exclusive" content of the larger on. This situation can be
624 // all the "exclusive" content of the larger on. This situation can be
628 // frequent when the subgraph of revision we are processing has a lot
625 // frequent when the subgraph of revision we are processing has a lot
629 // of roots. Each roots adding they own fully new map to the mix (and
626 // of roots. Each roots adding they own fully new map to the mix (and
630 // likely a small map, if the path from the root to the "main path" is
627 // likely a small map, if the path from the root to the "main path" is
631 // small.
628 // small.
632 //
629 //
633 // We could do better by detecting such situation and processing them
630 // We could do better by detecting such situation and processing them
634 // differently.
631 // differently.
635 for d in minor.diff(&major) {
632 for d in minor.diff(&major) {
636 match d {
633 match d {
637 DiffItem::Add(k, v) => to_minor(k, v),
634 DiffItem::Add(k, v) => to_minor(k, v),
638 DiffItem::Remove(k, v) => to_major(k, v),
635 DiffItem::Remove(k, v) => to_major(k, v),
639 DiffItem::Update { old, new } => {
636 DiffItem::Update { old, new } => {
640 let (dest, src_major) = new;
637 let (dest, src_major) = new;
641 let (_, src_minor) = old;
638 let (_, src_minor) = old;
642 match cmp_value(dest, src_minor, src_major) {
639 match cmp_value(dest, src_minor, src_major) {
643 MergePick::Major => to_minor(dest, src_major),
640 MergePick::Major => to_minor(dest, src_major),
644 MergePick::Minor => to_major(dest, src_minor),
641 MergePick::Minor => to_major(dest, src_minor),
645 // If the two entry are identical, no need to do
642 // If the two entry are identical, no need to do
646 // anything (but diff should not have yield them)
643 // anything (but diff should not have yield them)
647 MergePick::Any => unreachable!(),
644 MergePick::Any => unreachable!(),
648 }
645 }
649 }
646 }
650 };
647 };
651 }
648 }
652
649
653 let updates;
650 let updates;
654 let mut result;
651 let mut result;
655 if override_major.is_empty() {
652 if override_major.is_empty() {
656 result = major
653 result = major
657 } else if override_minor.is_empty() {
654 } else if override_minor.is_empty() {
658 result = minor
655 result = minor
659 } else {
656 } else {
660 if override_minor.len() < override_major.len() {
657 if override_minor.len() < override_major.len() {
661 updates = override_minor;
658 updates = override_minor;
662 result = minor;
659 result = minor;
663 } else {
660 } else {
664 updates = override_major;
661 updates = override_major;
665 result = major;
662 result = major;
666 }
663 }
667 for (k, v) in updates {
664 for (k, v) in updates {
668 result.insert(k, v);
665 result.insert(k, v);
669 }
666 }
670 }
667 }
671 result
668 result
672 }
669 }
673 }
670 }
674
671
675 /// represent the side that should prevail when merging two
672 /// represent the side that should prevail when merging two
676 /// TimeStampedPathCopies
673 /// TimeStampedPathCopies
677 enum MergePick {
674 enum MergePick {
678 /// The "major" (p1) side prevails
675 /// The "major" (p1) side prevails
679 Major,
676 Major,
680 /// The "minor" (p2) side prevails
677 /// The "minor" (p2) side prevails
681 Minor,
678 Minor,
682 /// Any side could be used (because they are the same)
679 /// Any side could be used (because they are the same)
683 Any,
680 Any,
684 }
681 }
685
682
686 /// decide which side prevails in case of conflicting values
683 /// decide which side prevails in case of conflicting values
687 #[allow(clippy::if_same_then_else)]
684 #[allow(clippy::if_same_then_else)]
688 fn compare_value<A: Fn(Revision, Revision) -> bool>(
685 fn compare_value<A: Fn(Revision, Revision) -> bool>(
689 path_map: &TwoWayPathMap,
686 path_map: &TwoWayPathMap,
690 changes: &ChangedFiles,
687 changes: &ChangedFiles,
691 oracle: &mut AncestorOracle<A>,
688 oracle: &mut AncestorOracle<A>,
692 dest: &PathToken,
689 dest: &PathToken,
693 src_minor: &TimeStampedPathCopy,
690 src_minor: &TimeStampedPathCopy,
694 src_major: &TimeStampedPathCopy,
691 src_major: &TimeStampedPathCopy,
695 ) -> MergePick {
692 ) -> MergePick {
696 if src_major.path == src_minor.path {
693 if src_major.path == src_minor.path {
697 // we have the same value, but from other source;
694 // we have the same value, but from other source;
698 if src_major.rev == src_minor.rev {
695 if src_major.rev == src_minor.rev {
699 // If the two entry are identical, they are both valid
696 // If the two entry are identical, they are both valid
700 MergePick::Any
697 MergePick::Any
701 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
698 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
702 MergePick::Minor
699 MergePick::Minor
703 } else {
700 } else {
704 MergePick::Major
701 MergePick::Major
705 }
702 }
706 } else if src_major.rev == src_minor.rev {
703 } else if src_major.rev == src_minor.rev {
707 // We cannot get copy information for both p1 and p2 in the
704 // We cannot get copy information for both p1 and p2 in the
708 // same rev. So this is the same value.
705 // same rev. So this is the same value.
709 unreachable!(
706 unreachable!(
710 "conflict information from p1 and p2 in the same revision"
707 "conflict information from p1 and p2 in the same revision"
711 );
708 );
712 } else {
709 } else {
713 let dest_path = path_map.untokenize(*dest);
710 let dest_path = path_map.untokenize(*dest);
714 let action = changes.get_merge_case(dest_path);
711 let action = changes.get_merge_case(dest_path);
715 if src_major.path.is_none() && action == MergeCase::Salvaged {
712 if src_major.path.is_none() && action == MergeCase::Salvaged {
716 // If the file is "deleted" in the major side but was
713 // If the file is "deleted" in the major side but was
717 // salvaged by the merge, we keep the minor side alive
714 // salvaged by the merge, we keep the minor side alive
718 MergePick::Minor
715 MergePick::Minor
719 } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
716 } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
720 // If the file is "deleted" in the minor side but was
717 // If the file is "deleted" in the minor side but was
721 // salvaged by the merge, unconditionnaly preserve the
718 // salvaged by the merge, unconditionnaly preserve the
722 // major side.
719 // major side.
723 MergePick::Major
720 MergePick::Major
724 } else if action == MergeCase::Merged {
721 } else if action == MergeCase::Merged {
725 // If the file was actively merged, copy information
722 // If the file was actively merged, copy information
726 // from each side might conflict. The major side will
723 // from each side might conflict. The major side will
727 // win such conflict.
724 // win such conflict.
728 MergePick::Major
725 MergePick::Major
729 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
726 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
730 // If the minor side is strictly newer than the major
727 // If the minor side is strictly newer than the major
731 // side, it should be kept.
728 // side, it should be kept.
732 MergePick::Minor
729 MergePick::Minor
733 } else if src_major.path.is_some() {
730 } else if src_major.path.is_some() {
734 // without any special case, the "major" value win
731 // without any special case, the "major" value win
735 // other the "minor" one.
732 // other the "minor" one.
736 MergePick::Major
733 MergePick::Major
737 } else if oracle.is_ancestor(src_minor.rev, src_major.rev) {
734 } else if oracle.is_ancestor(src_minor.rev, src_major.rev) {
738 // the "major" rev is a direct ancestors of "minor",
735 // the "major" rev is a direct ancestors of "minor",
739 // any different value should
736 // any different value should
740 // overwrite
737 // overwrite
741 MergePick::Major
738 MergePick::Major
742 } else {
739 } else {
743 // major version is None (so the file was deleted on
740 // major version is None (so the file was deleted on
744 // that branch) and that branch is independant (neither
741 // that branch) and that branch is independant (neither
745 // minor nor major is an ancestors of the other one.)
742 // minor nor major is an ancestors of the other one.)
746 // We preserve the new
743 // We preserve the new
747 // information about the new file.
744 // information about the new file.
748 MergePick::Minor
745 MergePick::Minor
749 }
746 }
750 }
747 }
751 }
748 }
General Comments 0
You need to be logged in to leave comments. Login now