##// END OF EJS Templates
copies-rust: use the `entry` API for copy information too...
marmoute -
r46768:0a721fc4 default
parent child Browse files
Show More
@@ -1,748 +1,758 b''
1 use crate::utils::hg_path::HgPath;
1 use crate::utils::hg_path::HgPath;
2 use crate::utils::hg_path::HgPathBuf;
2 use crate::utils::hg_path::HgPathBuf;
3 use crate::Revision;
3 use crate::Revision;
4 use crate::NULL_REVISION;
4 use crate::NULL_REVISION;
5
5
6 use im_rc::ordmap::DiffItem;
6 use im_rc::ordmap::DiffItem;
7 use im_rc::ordmap::Entry;
7 use im_rc::ordmap::OrdMap;
8 use im_rc::ordmap::OrdMap;
8
9
9 use std::cmp::Ordering;
10 use std::cmp::Ordering;
10 use std::collections::HashMap;
11 use std::collections::HashMap;
11 use std::convert::TryInto;
12 use std::convert::TryInto;
12
13
13 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
14 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
14
15
15 type PathToken = usize;
16 type PathToken = usize;
16
17
17 #[derive(Clone, Debug, PartialEq, Copy)]
18 #[derive(Clone, Debug, PartialEq, Copy)]
18 struct TimeStampedPathCopy {
19 struct TimeStampedPathCopy {
19 /// revision at which the copy information was added
20 /// revision at which the copy information was added
20 rev: Revision,
21 rev: Revision,
21 /// the copy source, (Set to None in case of deletion of the associated
22 /// the copy source, (Set to None in case of deletion of the associated
22 /// key)
23 /// key)
23 path: Option<PathToken>,
24 path: Option<PathToken>,
24 }
25 }
25
26
26 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
27 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
27 type TimeStampedPathCopies = OrdMap<PathToken, TimeStampedPathCopy>;
28 type TimeStampedPathCopies = OrdMap<PathToken, TimeStampedPathCopy>;
28
29
29 /// hold parent 1, parent 2 and relevant files actions.
30 /// hold parent 1, parent 2 and relevant files actions.
30 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
31 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
31
32
32 /// represent the files affected by a changesets
33 /// represent the files affected by a changesets
33 ///
34 ///
34 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
35 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
35 /// all the data categories tracked by it.
36 /// all the data categories tracked by it.
36 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
37 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
37 /// all the data categories tracked by it.
38 /// all the data categories tracked by it.
38 pub struct ChangedFiles<'a> {
39 pub struct ChangedFiles<'a> {
39 nb_items: u32,
40 nb_items: u32,
40 index: &'a [u8],
41 index: &'a [u8],
41 data: &'a [u8],
42 data: &'a [u8],
42 }
43 }
43
44
44 /// Represent active changes that affect the copy tracing.
45 /// Represent active changes that affect the copy tracing.
45 enum Action<'a> {
46 enum Action<'a> {
46 /// The parent ? children edge is removing a file
47 /// The parent ? children edge is removing a file
47 ///
48 ///
48 /// (actually, this could be the edge from the other parent, but it does
49 /// (actually, this could be the edge from the other parent, but it does
49 /// not matters)
50 /// not matters)
50 Removed(&'a HgPath),
51 Removed(&'a HgPath),
51 /// The parent ? children edge introduce copy information between (dest,
52 /// The parent ? children edge introduce copy information between (dest,
52 /// source)
53 /// source)
53 Copied(&'a HgPath, &'a HgPath),
54 Copied(&'a HgPath, &'a HgPath),
54 }
55 }
55
56
56 /// This express the possible "special" case we can get in a merge
57 /// This express the possible "special" case we can get in a merge
57 ///
58 ///
58 /// See mercurial/metadata.py for details on these values.
59 /// See mercurial/metadata.py for details on these values.
59 #[derive(PartialEq)]
60 #[derive(PartialEq)]
60 enum MergeCase {
61 enum MergeCase {
61 /// Merged: file had history on both side that needed to be merged
62 /// Merged: file had history on both side that needed to be merged
62 Merged,
63 Merged,
63 /// Salvaged: file was candidate for deletion, but survived the merge
64 /// Salvaged: file was candidate for deletion, but survived the merge
64 Salvaged,
65 Salvaged,
65 /// Normal: Not one of the two cases above
66 /// Normal: Not one of the two cases above
66 Normal,
67 Normal,
67 }
68 }
68
69
69 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
70 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
70
71
71 const EMPTY: &[u8] = b"";
72 const EMPTY: &[u8] = b"";
72 const COPY_MASK: u8 = 3;
73 const COPY_MASK: u8 = 3;
73 const P1_COPY: u8 = 2;
74 const P1_COPY: u8 = 2;
74 const P2_COPY: u8 = 3;
75 const P2_COPY: u8 = 3;
75 const ACTION_MASK: u8 = 28;
76 const ACTION_MASK: u8 = 28;
76 const REMOVED: u8 = 12;
77 const REMOVED: u8 = 12;
77 const MERGED: u8 = 8;
78 const MERGED: u8 = 8;
78 const SALVAGED: u8 = 16;
79 const SALVAGED: u8 = 16;
79
80
80 impl<'a> ChangedFiles<'a> {
81 impl<'a> ChangedFiles<'a> {
81 const INDEX_START: usize = 4;
82 const INDEX_START: usize = 4;
82 const ENTRY_SIZE: u32 = 9;
83 const ENTRY_SIZE: u32 = 9;
83 const FILENAME_START: u32 = 1;
84 const FILENAME_START: u32 = 1;
84 const COPY_SOURCE_START: u32 = 5;
85 const COPY_SOURCE_START: u32 = 5;
85
86
86 pub fn new(data: &'a [u8]) -> Self {
87 pub fn new(data: &'a [u8]) -> Self {
87 assert!(
88 assert!(
88 data.len() >= 4,
89 data.len() >= 4,
89 "data size ({}) is too small to contain the header (4)",
90 "data size ({}) is too small to contain the header (4)",
90 data.len()
91 data.len()
91 );
92 );
92 let nb_items_raw: [u8; 4] = (&data[0..=3])
93 let nb_items_raw: [u8; 4] = (&data[0..=3])
93 .try_into()
94 .try_into()
94 .expect("failed to turn 4 bytes into 4 bytes");
95 .expect("failed to turn 4 bytes into 4 bytes");
95 let nb_items = u32::from_be_bytes(nb_items_raw);
96 let nb_items = u32::from_be_bytes(nb_items_raw);
96
97
97 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
98 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
98 let index_end = Self::INDEX_START + index_size;
99 let index_end = Self::INDEX_START + index_size;
99
100
100 assert!(
101 assert!(
101 data.len() >= index_end,
102 data.len() >= index_end,
102 "data size ({}) is too small to fit the index_data ({})",
103 "data size ({}) is too small to fit the index_data ({})",
103 data.len(),
104 data.len(),
104 index_end
105 index_end
105 );
106 );
106
107
107 let ret = ChangedFiles {
108 let ret = ChangedFiles {
108 nb_items,
109 nb_items,
109 index: &data[Self::INDEX_START..index_end],
110 index: &data[Self::INDEX_START..index_end],
110 data: &data[index_end..],
111 data: &data[index_end..],
111 };
112 };
112 let max_data = ret.filename_end(nb_items - 1) as usize;
113 let max_data = ret.filename_end(nb_items - 1) as usize;
113 assert!(
114 assert!(
114 ret.data.len() >= max_data,
115 ret.data.len() >= max_data,
115 "data size ({}) is too small to fit all data ({})",
116 "data size ({}) is too small to fit all data ({})",
116 data.len(),
117 data.len(),
117 index_end + max_data
118 index_end + max_data
118 );
119 );
119 ret
120 ret
120 }
121 }
121
122
122 pub fn new_empty() -> Self {
123 pub fn new_empty() -> Self {
123 ChangedFiles {
124 ChangedFiles {
124 nb_items: 0,
125 nb_items: 0,
125 index: EMPTY,
126 index: EMPTY,
126 data: EMPTY,
127 data: EMPTY,
127 }
128 }
128 }
129 }
129
130
130 /// internal function to return an individual entry at a given index
131 /// internal function to return an individual entry at a given index
131 fn entry(&'a self, idx: u32) -> FileChange<'a> {
132 fn entry(&'a self, idx: u32) -> FileChange<'a> {
132 if idx >= self.nb_items {
133 if idx >= self.nb_items {
133 panic!(
134 panic!(
134 "index for entry is higher that the number of file {} >= {}",
135 "index for entry is higher that the number of file {} >= {}",
135 idx, self.nb_items
136 idx, self.nb_items
136 )
137 )
137 }
138 }
138 let flags = self.flags(idx);
139 let flags = self.flags(idx);
139 let filename = self.filename(idx);
140 let filename = self.filename(idx);
140 let copy_idx = self.copy_idx(idx);
141 let copy_idx = self.copy_idx(idx);
141 let copy_source = self.filename(copy_idx);
142 let copy_source = self.filename(copy_idx);
142 (flags, filename, copy_source)
143 (flags, filename, copy_source)
143 }
144 }
144
145
145 /// internal function to return the filename of the entry at a given index
146 /// internal function to return the filename of the entry at a given index
146 fn filename(&self, idx: u32) -> &HgPath {
147 fn filename(&self, idx: u32) -> &HgPath {
147 let filename_start;
148 let filename_start;
148 if idx == 0 {
149 if idx == 0 {
149 filename_start = 0;
150 filename_start = 0;
150 } else {
151 } else {
151 filename_start = self.filename_end(idx - 1)
152 filename_start = self.filename_end(idx - 1)
152 }
153 }
153 let filename_end = self.filename_end(idx);
154 let filename_end = self.filename_end(idx);
154 let filename_start = filename_start as usize;
155 let filename_start = filename_start as usize;
155 let filename_end = filename_end as usize;
156 let filename_end = filename_end as usize;
156 HgPath::new(&self.data[filename_start..filename_end])
157 HgPath::new(&self.data[filename_start..filename_end])
157 }
158 }
158
159
159 /// internal function to return the flag field of the entry at a given
160 /// internal function to return the flag field of the entry at a given
160 /// index
161 /// index
161 fn flags(&self, idx: u32) -> u8 {
162 fn flags(&self, idx: u32) -> u8 {
162 let idx = idx as usize;
163 let idx = idx as usize;
163 self.index[idx * (Self::ENTRY_SIZE as usize)]
164 self.index[idx * (Self::ENTRY_SIZE as usize)]
164 }
165 }
165
166
166 /// internal function to return the end of a filename part at a given index
167 /// internal function to return the end of a filename part at a given index
167 fn filename_end(&self, idx: u32) -> u32 {
168 fn filename_end(&self, idx: u32) -> u32 {
168 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
169 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
169 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
170 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
170 let start = start as usize;
171 let start = start as usize;
171 let end = end as usize;
172 let end = end as usize;
172 let raw = (&self.index[start..end])
173 let raw = (&self.index[start..end])
173 .try_into()
174 .try_into()
174 .expect("failed to turn 4 bytes into 4 bytes");
175 .expect("failed to turn 4 bytes into 4 bytes");
175 u32::from_be_bytes(raw)
176 u32::from_be_bytes(raw)
176 }
177 }
177
178
178 /// internal function to return index of the copy source of the entry at a
179 /// internal function to return index of the copy source of the entry at a
179 /// given index
180 /// given index
180 fn copy_idx(&self, idx: u32) -> u32 {
181 fn copy_idx(&self, idx: u32) -> u32 {
181 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
182 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
182 let end = (idx + 1) * Self::ENTRY_SIZE;
183 let end = (idx + 1) * Self::ENTRY_SIZE;
183 let start = start as usize;
184 let start = start as usize;
184 let end = end as usize;
185 let end = end as usize;
185 let raw = (&self.index[start..end])
186 let raw = (&self.index[start..end])
186 .try_into()
187 .try_into()
187 .expect("failed to turn 4 bytes into 4 bytes");
188 .expect("failed to turn 4 bytes into 4 bytes");
188 u32::from_be_bytes(raw)
189 u32::from_be_bytes(raw)
189 }
190 }
190
191
191 /// Return an iterator over all the `Action` in this instance.
192 /// Return an iterator over all the `Action` in this instance.
192 fn iter_actions(&self, parent: Parent) -> ActionsIterator {
193 fn iter_actions(&self, parent: Parent) -> ActionsIterator {
193 ActionsIterator {
194 ActionsIterator {
194 changes: &self,
195 changes: &self,
195 parent: parent,
196 parent: parent,
196 current: 0,
197 current: 0,
197 }
198 }
198 }
199 }
199
200
200 /// return the MergeCase value associated with a filename
201 /// return the MergeCase value associated with a filename
201 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
202 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
202 if self.nb_items == 0 {
203 if self.nb_items == 0 {
203 return MergeCase::Normal;
204 return MergeCase::Normal;
204 }
205 }
205 let mut low_part = 0;
206 let mut low_part = 0;
206 let mut high_part = self.nb_items;
207 let mut high_part = self.nb_items;
207
208
208 while low_part < high_part {
209 while low_part < high_part {
209 let cursor = (low_part + high_part - 1) / 2;
210 let cursor = (low_part + high_part - 1) / 2;
210 let (flags, filename, _source) = self.entry(cursor);
211 let (flags, filename, _source) = self.entry(cursor);
211 match path.cmp(filename) {
212 match path.cmp(filename) {
212 Ordering::Less => low_part = cursor + 1,
213 Ordering::Less => low_part = cursor + 1,
213 Ordering::Greater => high_part = cursor,
214 Ordering::Greater => high_part = cursor,
214 Ordering::Equal => {
215 Ordering::Equal => {
215 return match flags & ACTION_MASK {
216 return match flags & ACTION_MASK {
216 MERGED => MergeCase::Merged,
217 MERGED => MergeCase::Merged,
217 SALVAGED => MergeCase::Salvaged,
218 SALVAGED => MergeCase::Salvaged,
218 _ => MergeCase::Normal,
219 _ => MergeCase::Normal,
219 };
220 };
220 }
221 }
221 }
222 }
222 }
223 }
223 MergeCase::Normal
224 MergeCase::Normal
224 }
225 }
225 }
226 }
226
227
227 /// A struct responsible for answering "is X ancestors of Y" quickly
228 /// A struct responsible for answering "is X ancestors of Y" quickly
228 ///
229 ///
229 /// The structure will delegate ancestors call to a callback, and cache the
230 /// The structure will delegate ancestors call to a callback, and cache the
230 /// result.
231 /// result.
231 #[derive(Debug)]
232 #[derive(Debug)]
232 struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
233 struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
233 inner: &'a A,
234 inner: &'a A,
234 pairs: HashMap<(Revision, Revision), bool>,
235 pairs: HashMap<(Revision, Revision), bool>,
235 }
236 }
236
237
237 impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
238 impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
238 fn new(func: &'a A) -> Self {
239 fn new(func: &'a A) -> Self {
239 Self {
240 Self {
240 inner: func,
241 inner: func,
241 pairs: HashMap::default(),
242 pairs: HashMap::default(),
242 }
243 }
243 }
244 }
244
245
245 /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
246 /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
246 fn is_ancestor(&mut self, anc: Revision, desc: Revision) -> bool {
247 fn is_ancestor(&mut self, anc: Revision, desc: Revision) -> bool {
247 if anc > desc {
248 if anc > desc {
248 false
249 false
249 } else if anc == desc {
250 } else if anc == desc {
250 true
251 true
251 } else {
252 } else {
252 if let Some(b) = self.pairs.get(&(anc, desc)) {
253 if let Some(b) = self.pairs.get(&(anc, desc)) {
253 *b
254 *b
254 } else {
255 } else {
255 let b = (self.inner)(anc, desc);
256 let b = (self.inner)(anc, desc);
256 self.pairs.insert((anc, desc), b);
257 self.pairs.insert((anc, desc), b);
257 b
258 b
258 }
259 }
259 }
260 }
260 }
261 }
261 }
262 }
262
263
263 struct ActionsIterator<'a> {
264 struct ActionsIterator<'a> {
264 changes: &'a ChangedFiles<'a>,
265 changes: &'a ChangedFiles<'a>,
265 parent: Parent,
266 parent: Parent,
266 current: u32,
267 current: u32,
267 }
268 }
268
269
269 impl<'a> Iterator for ActionsIterator<'a> {
270 impl<'a> Iterator for ActionsIterator<'a> {
270 type Item = Action<'a>;
271 type Item = Action<'a>;
271
272
272 fn next(&mut self) -> Option<Action<'a>> {
273 fn next(&mut self) -> Option<Action<'a>> {
273 let copy_flag = match self.parent {
274 let copy_flag = match self.parent {
274 Parent::FirstParent => P1_COPY,
275 Parent::FirstParent => P1_COPY,
275 Parent::SecondParent => P2_COPY,
276 Parent::SecondParent => P2_COPY,
276 };
277 };
277 while self.current < self.changes.nb_items {
278 while self.current < self.changes.nb_items {
278 let (flags, file, source) = self.changes.entry(self.current);
279 let (flags, file, source) = self.changes.entry(self.current);
279 self.current += 1;
280 self.current += 1;
280 if (flags & ACTION_MASK) == REMOVED {
281 if (flags & ACTION_MASK) == REMOVED {
281 return Some(Action::Removed(file));
282 return Some(Action::Removed(file));
282 }
283 }
283 let copy = flags & COPY_MASK;
284 let copy = flags & COPY_MASK;
284 if copy == copy_flag {
285 if copy == copy_flag {
285 return Some(Action::Copied(file, source));
286 return Some(Action::Copied(file, source));
286 }
287 }
287 }
288 }
288 return None;
289 return None;
289 }
290 }
290 }
291 }
291
292
292 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
293 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
293 /// ChangedFiles
294 /// ChangedFiles
294 ///
295 ///
295 /// It is passed to the RevInfoMaker callback who can assign any necessary
296 /// It is passed to the RevInfoMaker callback who can assign any necessary
296 /// content to the `data` attribute. The copy tracing code is responsible for
297 /// content to the `data` attribute. The copy tracing code is responsible for
297 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
298 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
298 pub struct DataHolder<D> {
299 pub struct DataHolder<D> {
299 /// RevInfoMaker callback should assign data referenced by the
300 /// RevInfoMaker callback should assign data referenced by the
300 /// ChangedFiles struct it return to this attribute. The DataHolder
301 /// ChangedFiles struct it return to this attribute. The DataHolder
301 /// lifetime will be at least as long as the ChangedFiles one.
302 /// lifetime will be at least as long as the ChangedFiles one.
302 pub data: Option<D>,
303 pub data: Option<D>,
303 }
304 }
304
305
305 pub type RevInfoMaker<'a, D> =
306 pub type RevInfoMaker<'a, D> =
306 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
307 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
307
308
308 /// enum used to carry information about the parent β†’ child currently processed
309 /// enum used to carry information about the parent β†’ child currently processed
309 #[derive(Copy, Clone, Debug)]
310 #[derive(Copy, Clone, Debug)]
310 enum Parent {
311 enum Parent {
311 /// The `p1(x) β†’ x` edge
312 /// The `p1(x) β†’ x` edge
312 FirstParent,
313 FirstParent,
313 /// The `p2(x) β†’ x` edge
314 /// The `p2(x) β†’ x` edge
314 SecondParent,
315 SecondParent,
315 }
316 }
316
317
317 /// A small "tokenizer" responsible of turning full HgPath into lighter
318 /// A small "tokenizer" responsible of turning full HgPath into lighter
318 /// PathToken
319 /// PathToken
319 ///
320 ///
320 /// Dealing with small object, like integer is much faster, so HgPath input are
321 /// Dealing with small object, like integer is much faster, so HgPath input are
321 /// turned into integer "PathToken" and converted back in the end.
322 /// turned into integer "PathToken" and converted back in the end.
322 #[derive(Clone, Debug, Default)]
323 #[derive(Clone, Debug, Default)]
323 struct TwoWayPathMap {
324 struct TwoWayPathMap {
324 token: HashMap<HgPathBuf, PathToken>,
325 token: HashMap<HgPathBuf, PathToken>,
325 path: Vec<HgPathBuf>,
326 path: Vec<HgPathBuf>,
326 }
327 }
327
328
328 impl TwoWayPathMap {
329 impl TwoWayPathMap {
329 fn tokenize(&mut self, path: &HgPath) -> PathToken {
330 fn tokenize(&mut self, path: &HgPath) -> PathToken {
330 match self.token.get(path) {
331 match self.token.get(path) {
331 Some(a) => *a,
332 Some(a) => *a,
332 None => {
333 None => {
333 let a = self.token.len();
334 let a = self.token.len();
334 let buf = path.to_owned();
335 let buf = path.to_owned();
335 self.path.push(buf.clone());
336 self.path.push(buf.clone());
336 self.token.insert(buf, a);
337 self.token.insert(buf, a);
337 a
338 a
338 }
339 }
339 }
340 }
340 }
341 }
341
342
342 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
343 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
343 assert!(token < self.path.len(), format!("Unknown token: {}", token));
344 assert!(token < self.path.len(), format!("Unknown token: {}", token));
344 &self.path[token]
345 &self.path[token]
345 }
346 }
346 }
347 }
347
348
348 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
349 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
349 ///
350 ///
350 /// Arguments are:
351 /// Arguments are:
351 ///
352 ///
352 /// revs: all revisions to be considered
353 /// revs: all revisions to be considered
353 /// children: a {parent ? [childrens]} mapping
354 /// children: a {parent ? [childrens]} mapping
354 /// target_rev: the final revision we are combining copies to
355 /// target_rev: the final revision we are combining copies to
355 /// rev_info(rev): callback to get revision information:
356 /// rev_info(rev): callback to get revision information:
356 /// * first parent
357 /// * first parent
357 /// * second parent
358 /// * second parent
358 /// * ChangedFiles
359 /// * ChangedFiles
359 /// isancestors(low_rev, high_rev): callback to check if a revision is an
360 /// isancestors(low_rev, high_rev): callback to check if a revision is an
360 /// ancestor of another
361 /// ancestor of another
361 pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
362 pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
362 revs: Vec<Revision>,
363 revs: Vec<Revision>,
363 mut children_count: HashMap<Revision, usize>,
364 mut children_count: HashMap<Revision, usize>,
364 target_rev: Revision,
365 target_rev: Revision,
365 rev_info: RevInfoMaker<D>,
366 rev_info: RevInfoMaker<D>,
366 is_ancestor: &A,
367 is_ancestor: &A,
367 ) -> PathCopies {
368 ) -> PathCopies {
368 let mut all_copies = HashMap::new();
369 let mut all_copies = HashMap::new();
369 let mut oracle = AncestorOracle::new(is_ancestor);
370 let mut oracle = AncestorOracle::new(is_ancestor);
370
371
371 let mut path_map = TwoWayPathMap::default();
372 let mut path_map = TwoWayPathMap::default();
372
373
373 for rev in revs {
374 for rev in revs {
374 let mut d: DataHolder<D> = DataHolder { data: None };
375 let mut d: DataHolder<D> = DataHolder { data: None };
375 let (p1, p2, changes) = rev_info(rev, &mut d);
376 let (p1, p2, changes) = rev_info(rev, &mut d);
376
377
377 // We will chain the copies information accumulated for the parent with
378 // We will chain the copies information accumulated for the parent with
378 // the individual copies information the curent revision. Creating a
379 // the individual copies information the curent revision. Creating a
379 // new TimeStampedPath for each `rev` β†’ `children` vertex.
380 // new TimeStampedPath for each `rev` β†’ `children` vertex.
380 let mut copies: Option<TimeStampedPathCopies> = None;
381 let mut copies: Option<TimeStampedPathCopies> = None;
381 if p1 != NULL_REVISION {
382 if p1 != NULL_REVISION {
382 // Retrieve data computed in a previous iteration
383 // Retrieve data computed in a previous iteration
383 let parent_copies = get_and_clean_parent_copies(
384 let parent_copies = get_and_clean_parent_copies(
384 &mut all_copies,
385 &mut all_copies,
385 &mut children_count,
386 &mut children_count,
386 p1,
387 p1,
387 );
388 );
388 if let Some(parent_copies) = parent_copies {
389 if let Some(parent_copies) = parent_copies {
389 // combine it with data for that revision
390 // combine it with data for that revision
390 let vertex_copies = add_from_changes(
391 let vertex_copies = add_from_changes(
391 &mut path_map,
392 &mut path_map,
392 &parent_copies,
393 &parent_copies,
393 &changes,
394 &changes,
394 Parent::FirstParent,
395 Parent::FirstParent,
395 rev,
396 rev,
396 );
397 );
397 // keep that data around for potential later combination
398 // keep that data around for potential later combination
398 copies = Some(vertex_copies);
399 copies = Some(vertex_copies);
399 }
400 }
400 }
401 }
401 if p2 != NULL_REVISION {
402 if p2 != NULL_REVISION {
402 // Retrieve data computed in a previous iteration
403 // Retrieve data computed in a previous iteration
403 let parent_copies = get_and_clean_parent_copies(
404 let parent_copies = get_and_clean_parent_copies(
404 &mut all_copies,
405 &mut all_copies,
405 &mut children_count,
406 &mut children_count,
406 p2,
407 p2,
407 );
408 );
408 if let Some(parent_copies) = parent_copies {
409 if let Some(parent_copies) = parent_copies {
409 // combine it with data for that revision
410 // combine it with data for that revision
410 let vertex_copies = add_from_changes(
411 let vertex_copies = add_from_changes(
411 &mut path_map,
412 &mut path_map,
412 &parent_copies,
413 &parent_copies,
413 &changes,
414 &changes,
414 Parent::SecondParent,
415 Parent::SecondParent,
415 rev,
416 rev,
416 );
417 );
417
418
418 copies = match copies {
419 copies = match copies {
419 None => Some(vertex_copies),
420 None => Some(vertex_copies),
420 // Merge has two parents needs to combines their copy
421 // Merge has two parents needs to combines their copy
421 // information.
422 // information.
422 //
423 //
423 // If we got data from both parents, We need to combine
424 // If we got data from both parents, We need to combine
424 // them.
425 // them.
425 Some(copies) => Some(merge_copies_dict(
426 Some(copies) => Some(merge_copies_dict(
426 &path_map,
427 &path_map,
427 vertex_copies,
428 vertex_copies,
428 copies,
429 copies,
429 &changes,
430 &changes,
430 &mut oracle,
431 &mut oracle,
431 )),
432 )),
432 };
433 };
433 }
434 }
434 }
435 }
435 match copies {
436 match copies {
436 Some(copies) => {
437 Some(copies) => {
437 all_copies.insert(rev, copies);
438 all_copies.insert(rev, copies);
438 }
439 }
439 _ => {}
440 _ => {}
440 }
441 }
441 }
442 }
442
443
443 // Drop internal information (like the timestamp) and return the final
444 // Drop internal information (like the timestamp) and return the final
444 // mapping.
445 // mapping.
445 let tt_result = all_copies
446 let tt_result = all_copies
446 .remove(&target_rev)
447 .remove(&target_rev)
447 .expect("target revision was not processed");
448 .expect("target revision was not processed");
448 let mut result = PathCopies::default();
449 let mut result = PathCopies::default();
449 for (dest, tt_source) in tt_result {
450 for (dest, tt_source) in tt_result {
450 if let Some(path) = tt_source.path {
451 if let Some(path) = tt_source.path {
451 let path_dest = path_map.untokenize(dest).to_owned();
452 let path_dest = path_map.untokenize(dest).to_owned();
452 let path_path = path_map.untokenize(path).to_owned();
453 let path_path = path_map.untokenize(path).to_owned();
453 result.insert(path_dest, path_path);
454 result.insert(path_dest, path_path);
454 }
455 }
455 }
456 }
456 result
457 result
457 }
458 }
458
459
459 /// fetch previous computed information
460 /// fetch previous computed information
460 ///
461 ///
461 /// If no other children are expected to need this information, we drop it from
462 /// If no other children are expected to need this information, we drop it from
462 /// the cache.
463 /// the cache.
463 ///
464 ///
464 /// If parent is not part of the set we are expected to walk, return None.
465 /// If parent is not part of the set we are expected to walk, return None.
465 fn get_and_clean_parent_copies(
466 fn get_and_clean_parent_copies(
466 all_copies: &mut HashMap<Revision, TimeStampedPathCopies>,
467 all_copies: &mut HashMap<Revision, TimeStampedPathCopies>,
467 children_count: &mut HashMap<Revision, usize>,
468 children_count: &mut HashMap<Revision, usize>,
468 parent_rev: Revision,
469 parent_rev: Revision,
469 ) -> Option<TimeStampedPathCopies> {
470 ) -> Option<TimeStampedPathCopies> {
470 let count = children_count.get_mut(&parent_rev)?;
471 let count = children_count.get_mut(&parent_rev)?;
471 *count -= 1;
472 *count -= 1;
472 if *count == 0 {
473 if *count == 0 {
473 match all_copies.remove(&parent_rev) {
474 match all_copies.remove(&parent_rev) {
474 Some(c) => Some(c),
475 Some(c) => Some(c),
475 None => Some(TimeStampedPathCopies::default()),
476 None => Some(TimeStampedPathCopies::default()),
476 }
477 }
477 } else {
478 } else {
478 match all_copies.get(&parent_rev) {
479 match all_copies.get(&parent_rev) {
479 Some(c) => Some(c.clone()),
480 Some(c) => Some(c.clone()),
480 None => Some(TimeStampedPathCopies::default()),
481 None => Some(TimeStampedPathCopies::default()),
481 }
482 }
482 }
483 }
483 }
484 }
484
485
485 /// Combine ChangedFiles with some existing PathCopies information and return
486 /// Combine ChangedFiles with some existing PathCopies information and return
486 /// the result
487 /// the result
487 fn add_from_changes(
488 fn add_from_changes(
488 path_map: &mut TwoWayPathMap,
489 path_map: &mut TwoWayPathMap,
489 base_copies: &TimeStampedPathCopies,
490 base_copies: &TimeStampedPathCopies,
490 changes: &ChangedFiles,
491 changes: &ChangedFiles,
491 parent: Parent,
492 parent: Parent,
492 current_rev: Revision,
493 current_rev: Revision,
493 ) -> TimeStampedPathCopies {
494 ) -> TimeStampedPathCopies {
494 let mut copies = base_copies.clone();
495 let mut copies = base_copies.clone();
495 for action in changes.iter_actions(parent) {
496 for action in changes.iter_actions(parent) {
496 match action {
497 match action {
497 Action::Copied(path_dest, path_source) => {
498 Action::Copied(path_dest, path_source) => {
498 let dest = path_map.tokenize(path_dest);
499 let dest = path_map.tokenize(path_dest);
499 let source = path_map.tokenize(path_source);
500 let source = path_map.tokenize(path_source);
500 let entry;
501 let entry;
501 if let Some(v) = base_copies.get(&source) {
502 if let Some(v) = base_copies.get(&source) {
502 entry = match &v.path {
503 entry = match &v.path {
503 Some(path) => Some((*(path)).to_owned()),
504 Some(path) => Some((*(path)).to_owned()),
504 None => Some(source.to_owned()),
505 None => Some(source.to_owned()),
505 }
506 }
506 } else {
507 } else {
507 entry = Some(source.to_owned());
508 entry = Some(source.to_owned());
508 }
509 }
509 // Each new entry is introduced by the children, we
510 // Each new entry is introduced by the children, we
510 // record this information as we will need it to take
511 // record this information as we will need it to take
511 // the right decision when merging conflicting copy
512 // the right decision when merging conflicting copy
512 // information. See merge_copies_dict for details.
513 // information. See merge_copies_dict for details.
514 match copies.entry(dest) {
515 Entry::Vacant(slot) => {
513 let ttpc = TimeStampedPathCopy {
516 let ttpc = TimeStampedPathCopy {
514 rev: current_rev,
517 rev: current_rev,
515 path: entry,
518 path: entry,
516 };
519 };
517 copies.insert(dest.to_owned(), ttpc);
520 slot.insert(ttpc);
521 }
522 Entry::Occupied(mut slot) => {
523 let mut ttpc = slot.get_mut();
524 ttpc.rev = current_rev;
525 ttpc.path = entry;
526 }
527 }
518 }
528 }
519 Action::Removed(deleted_path) => {
529 Action::Removed(deleted_path) => {
520 // We must drop copy information for removed file.
530 // We must drop copy information for removed file.
521 //
531 //
522 // We need to explicitly record them as dropped to
532 // We need to explicitly record them as dropped to
523 // propagate this information when merging two
533 // propagate this information when merging two
524 // TimeStampedPathCopies object.
534 // TimeStampedPathCopies object.
525 let deleted = path_map.tokenize(deleted_path);
535 let deleted = path_map.tokenize(deleted_path);
526 copies.entry(deleted).and_modify(|old| {
536 copies.entry(deleted).and_modify(|old| {
527 old.rev = current_rev;
537 old.rev = current_rev;
528 old.path = None;
538 old.path = None;
529 });
539 });
530 }
540 }
531 }
541 }
532 }
542 }
533 copies
543 copies
534 }
544 }
535
545
536 /// merge two copies-mapping together, minor and major
546 /// merge two copies-mapping together, minor and major
537 ///
547 ///
538 /// In case of conflict, value from "major" will be picked, unless in some
548 /// In case of conflict, value from "major" will be picked, unless in some
539 /// cases. See inline documentation for details.
549 /// cases. See inline documentation for details.
540 fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
550 fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
541 path_map: &TwoWayPathMap,
551 path_map: &TwoWayPathMap,
542 mut minor: TimeStampedPathCopies,
552 mut minor: TimeStampedPathCopies,
543 mut major: TimeStampedPathCopies,
553 mut major: TimeStampedPathCopies,
544 changes: &ChangedFiles,
554 changes: &ChangedFiles,
545 oracle: &mut AncestorOracle<A>,
555 oracle: &mut AncestorOracle<A>,
546 ) -> TimeStampedPathCopies {
556 ) -> TimeStampedPathCopies {
547 // This closure exist as temporary help while multiple developper are
557 // This closure exist as temporary help while multiple developper are
548 // actively working on this code. Feel free to re-inline it once this
558 // actively working on this code. Feel free to re-inline it once this
549 // code is more settled.
559 // code is more settled.
550 let mut cmp_value =
560 let mut cmp_value =
551 |dest: &PathToken,
561 |dest: &PathToken,
552 src_minor: &TimeStampedPathCopy,
562 src_minor: &TimeStampedPathCopy,
553 src_major: &TimeStampedPathCopy| {
563 src_major: &TimeStampedPathCopy| {
554 compare_value(
564 compare_value(
555 path_map, changes, oracle, dest, src_minor, src_major,
565 path_map, changes, oracle, dest, src_minor, src_major,
556 )
566 )
557 };
567 };
558 if minor.is_empty() {
568 if minor.is_empty() {
559 major
569 major
560 } else if major.is_empty() {
570 } else if major.is_empty() {
561 minor
571 minor
562 } else if minor.len() * 2 < major.len() {
572 } else if minor.len() * 2 < major.len() {
563 // Lets says we are merging two TimeStampedPathCopies instance A and B.
573 // Lets says we are merging two TimeStampedPathCopies instance A and B.
564 //
574 //
565 // If A contains N items, the merge result will never contains more
575 // If A contains N items, the merge result will never contains more
566 // than N values differents than the one in A
576 // than N values differents than the one in A
567 //
577 //
568 // If B contains M items, with M > N, the merge result will always
578 // If B contains M items, with M > N, the merge result will always
569 // result in a minimum of M - N value differents than the on in
579 // result in a minimum of M - N value differents than the on in
570 // A
580 // A
571 //
581 //
572 // As a result, if N < (M-N), we know that simply iterating over A will
582 // As a result, if N < (M-N), we know that simply iterating over A will
573 // yield less difference than iterating over the difference
583 // yield less difference than iterating over the difference
574 // between A and B.
584 // between A and B.
575 //
585 //
576 // This help performance a lot in case were a tiny
586 // This help performance a lot in case were a tiny
577 // TimeStampedPathCopies is merged with a much larger one.
587 // TimeStampedPathCopies is merged with a much larger one.
578 for (dest, src_minor) in minor {
588 for (dest, src_minor) in minor {
579 let src_major = major.get(&dest);
589 let src_major = major.get(&dest);
580 match src_major {
590 match src_major {
581 None => major.insert(dest, src_minor),
591 None => major.insert(dest, src_minor),
582 Some(src_major) => {
592 Some(src_major) => {
583 match cmp_value(&dest, &src_minor, src_major) {
593 match cmp_value(&dest, &src_minor, src_major) {
584 MergePick::Any | MergePick::Major => None,
594 MergePick::Any | MergePick::Major => None,
585 MergePick::Minor => major.insert(dest, src_minor),
595 MergePick::Minor => major.insert(dest, src_minor),
586 }
596 }
587 }
597 }
588 };
598 };
589 }
599 }
590 major
600 major
591 } else if major.len() * 2 < minor.len() {
601 } else if major.len() * 2 < minor.len() {
592 // This use the same rational than the previous block.
602 // This use the same rational than the previous block.
593 // (Check previous block documentation for details.)
603 // (Check previous block documentation for details.)
594 for (dest, src_major) in major {
604 for (dest, src_major) in major {
595 let src_minor = minor.get(&dest);
605 let src_minor = minor.get(&dest);
596 match src_minor {
606 match src_minor {
597 None => minor.insert(dest, src_major),
607 None => minor.insert(dest, src_major),
598 Some(src_minor) => {
608 Some(src_minor) => {
599 match cmp_value(&dest, src_minor, &src_major) {
609 match cmp_value(&dest, src_minor, &src_major) {
600 MergePick::Any | MergePick::Minor => None,
610 MergePick::Any | MergePick::Minor => None,
601 MergePick::Major => minor.insert(dest, src_major),
611 MergePick::Major => minor.insert(dest, src_major),
602 }
612 }
603 }
613 }
604 };
614 };
605 }
615 }
606 minor
616 minor
607 } else {
617 } else {
608 let mut override_minor = Vec::new();
618 let mut override_minor = Vec::new();
609 let mut override_major = Vec::new();
619 let mut override_major = Vec::new();
610
620
611 let mut to_major = |k: &PathToken, v: &TimeStampedPathCopy| {
621 let mut to_major = |k: &PathToken, v: &TimeStampedPathCopy| {
612 override_major.push((k.clone(), v.clone()))
622 override_major.push((k.clone(), v.clone()))
613 };
623 };
614 let mut to_minor = |k: &PathToken, v: &TimeStampedPathCopy| {
624 let mut to_minor = |k: &PathToken, v: &TimeStampedPathCopy| {
615 override_minor.push((k.clone(), v.clone()))
625 override_minor.push((k.clone(), v.clone()))
616 };
626 };
617
627
618 // The diff function leverage detection of the identical subpart if
628 // The diff function leverage detection of the identical subpart if
619 // minor and major has some common ancestors. This make it very
629 // minor and major has some common ancestors. This make it very
620 // fast is most case.
630 // fast is most case.
621 //
631 //
622 // In case where the two map are vastly different in size, the current
632 // In case where the two map are vastly different in size, the current
623 // approach is still slowish because the iteration will iterate over
633 // approach is still slowish because the iteration will iterate over
624 // all the "exclusive" content of the larger on. This situation can be
634 // all the "exclusive" content of the larger on. This situation can be
625 // frequent when the subgraph of revision we are processing has a lot
635 // frequent when the subgraph of revision we are processing has a lot
626 // of roots. Each roots adding they own fully new map to the mix (and
636 // of roots. Each roots adding they own fully new map to the mix (and
627 // likely a small map, if the path from the root to the "main path" is
637 // likely a small map, if the path from the root to the "main path" is
628 // small.
638 // small.
629 //
639 //
630 // We could do better by detecting such situation and processing them
640 // We could do better by detecting such situation and processing them
631 // differently.
641 // differently.
632 for d in minor.diff(&major) {
642 for d in minor.diff(&major) {
633 match d {
643 match d {
634 DiffItem::Add(k, v) => to_minor(k, v),
644 DiffItem::Add(k, v) => to_minor(k, v),
635 DiffItem::Remove(k, v) => to_major(k, v),
645 DiffItem::Remove(k, v) => to_major(k, v),
636 DiffItem::Update { old, new } => {
646 DiffItem::Update { old, new } => {
637 let (dest, src_major) = new;
647 let (dest, src_major) = new;
638 let (_, src_minor) = old;
648 let (_, src_minor) = old;
639 match cmp_value(dest, src_minor, src_major) {
649 match cmp_value(dest, src_minor, src_major) {
640 MergePick::Major => to_minor(dest, src_major),
650 MergePick::Major => to_minor(dest, src_major),
641 MergePick::Minor => to_major(dest, src_minor),
651 MergePick::Minor => to_major(dest, src_minor),
642 // If the two entry are identical, no need to do
652 // If the two entry are identical, no need to do
643 // anything (but diff should not have yield them)
653 // anything (but diff should not have yield them)
644 MergePick::Any => unreachable!(),
654 MergePick::Any => unreachable!(),
645 }
655 }
646 }
656 }
647 };
657 };
648 }
658 }
649
659
650 let updates;
660 let updates;
651 let mut result;
661 let mut result;
652 if override_major.is_empty() {
662 if override_major.is_empty() {
653 result = major
663 result = major
654 } else if override_minor.is_empty() {
664 } else if override_minor.is_empty() {
655 result = minor
665 result = minor
656 } else {
666 } else {
657 if override_minor.len() < override_major.len() {
667 if override_minor.len() < override_major.len() {
658 updates = override_minor;
668 updates = override_minor;
659 result = minor;
669 result = minor;
660 } else {
670 } else {
661 updates = override_major;
671 updates = override_major;
662 result = major;
672 result = major;
663 }
673 }
664 for (k, v) in updates {
674 for (k, v) in updates {
665 result.insert(k, v);
675 result.insert(k, v);
666 }
676 }
667 }
677 }
668 result
678 result
669 }
679 }
670 }
680 }
671
681
672 /// represent the side that should prevail when merging two
682 /// represent the side that should prevail when merging two
673 /// TimeStampedPathCopies
683 /// TimeStampedPathCopies
674 enum MergePick {
684 enum MergePick {
675 /// The "major" (p1) side prevails
685 /// The "major" (p1) side prevails
676 Major,
686 Major,
677 /// The "minor" (p2) side prevails
687 /// The "minor" (p2) side prevails
678 Minor,
688 Minor,
679 /// Any side could be used (because they are the same)
689 /// Any side could be used (because they are the same)
680 Any,
690 Any,
681 }
691 }
682
692
683 /// decide which side prevails in case of conflicting values
693 /// decide which side prevails in case of conflicting values
684 #[allow(clippy::if_same_then_else)]
694 #[allow(clippy::if_same_then_else)]
685 fn compare_value<A: Fn(Revision, Revision) -> bool>(
695 fn compare_value<A: Fn(Revision, Revision) -> bool>(
686 path_map: &TwoWayPathMap,
696 path_map: &TwoWayPathMap,
687 changes: &ChangedFiles,
697 changes: &ChangedFiles,
688 oracle: &mut AncestorOracle<A>,
698 oracle: &mut AncestorOracle<A>,
689 dest: &PathToken,
699 dest: &PathToken,
690 src_minor: &TimeStampedPathCopy,
700 src_minor: &TimeStampedPathCopy,
691 src_major: &TimeStampedPathCopy,
701 src_major: &TimeStampedPathCopy,
692 ) -> MergePick {
702 ) -> MergePick {
693 if src_major.path == src_minor.path {
703 if src_major.path == src_minor.path {
694 // we have the same value, but from other source;
704 // we have the same value, but from other source;
695 if src_major.rev == src_minor.rev {
705 if src_major.rev == src_minor.rev {
696 // If the two entry are identical, they are both valid
706 // If the two entry are identical, they are both valid
697 MergePick::Any
707 MergePick::Any
698 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
708 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
699 MergePick::Minor
709 MergePick::Minor
700 } else {
710 } else {
701 MergePick::Major
711 MergePick::Major
702 }
712 }
703 } else if src_major.rev == src_minor.rev {
713 } else if src_major.rev == src_minor.rev {
704 // We cannot get copy information for both p1 and p2 in the
714 // We cannot get copy information for both p1 and p2 in the
705 // same rev. So this is the same value.
715 // same rev. So this is the same value.
706 unreachable!(
716 unreachable!(
707 "conflict information from p1 and p2 in the same revision"
717 "conflict information from p1 and p2 in the same revision"
708 );
718 );
709 } else {
719 } else {
710 let dest_path = path_map.untokenize(*dest);
720 let dest_path = path_map.untokenize(*dest);
711 let action = changes.get_merge_case(dest_path);
721 let action = changes.get_merge_case(dest_path);
712 if src_major.path.is_none() && action == MergeCase::Salvaged {
722 if src_major.path.is_none() && action == MergeCase::Salvaged {
713 // If the file is "deleted" in the major side but was
723 // If the file is "deleted" in the major side but was
714 // salvaged by the merge, we keep the minor side alive
724 // salvaged by the merge, we keep the minor side alive
715 MergePick::Minor
725 MergePick::Minor
716 } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
726 } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
717 // If the file is "deleted" in the minor side but was
727 // If the file is "deleted" in the minor side but was
718 // salvaged by the merge, unconditionnaly preserve the
728 // salvaged by the merge, unconditionnaly preserve the
719 // major side.
729 // major side.
720 MergePick::Major
730 MergePick::Major
721 } else if action == MergeCase::Merged {
731 } else if action == MergeCase::Merged {
722 // If the file was actively merged, copy information
732 // If the file was actively merged, copy information
723 // from each side might conflict. The major side will
733 // from each side might conflict. The major side will
724 // win such conflict.
734 // win such conflict.
725 MergePick::Major
735 MergePick::Major
726 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
736 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
727 // If the minor side is strictly newer than the major
737 // If the minor side is strictly newer than the major
728 // side, it should be kept.
738 // side, it should be kept.
729 MergePick::Minor
739 MergePick::Minor
730 } else if src_major.path.is_some() {
740 } else if src_major.path.is_some() {
731 // without any special case, the "major" value win
741 // without any special case, the "major" value win
732 // other the "minor" one.
742 // other the "minor" one.
733 MergePick::Major
743 MergePick::Major
734 } else if oracle.is_ancestor(src_minor.rev, src_major.rev) {
744 } else if oracle.is_ancestor(src_minor.rev, src_major.rev) {
735 // the "major" rev is a direct ancestors of "minor",
745 // the "major" rev is a direct ancestors of "minor",
736 // any different value should
746 // any different value should
737 // overwrite
747 // overwrite
738 MergePick::Major
748 MergePick::Major
739 } else {
749 } else {
740 // major version is None (so the file was deleted on
750 // major version is None (so the file was deleted on
741 // that branch) and that branch is independant (neither
751 // that branch) and that branch is independant (neither
742 // minor nor major is an ancestors of the other one.)
752 // minor nor major is an ancestors of the other one.)
743 // We preserve the new
753 // We preserve the new
744 // information about the new file.
754 // information about the new file.
745 MergePick::Minor
755 MergePick::Minor
746 }
756 }
747 }
757 }
748 }
758 }
General Comments 0
You need to be logged in to leave comments. Login now