##// END OF EJS Templates
copies-rust: move the mapping merging into a else clause...
marmoute -
r46742:94300498 default
parent child Browse files
Show More
@@ -1,610 +1,611 b''
1 use crate::utils::hg_path::HgPath;
1 use crate::utils::hg_path::HgPath;
2 use crate::utils::hg_path::HgPathBuf;
2 use crate::utils::hg_path::HgPathBuf;
3 use crate::Revision;
3 use crate::Revision;
4
4
5 use im_rc::ordmap::DiffItem;
5 use im_rc::ordmap::DiffItem;
6 use im_rc::ordmap::OrdMap;
6 use im_rc::ordmap::OrdMap;
7
7
8 use std::cmp::Ordering;
8 use std::cmp::Ordering;
9 use std::collections::HashMap;
9 use std::collections::HashMap;
10 use std::convert::TryInto;
10 use std::convert::TryInto;
11
11
12 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
12 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
13
13
14 #[derive(Clone, Debug, PartialEq)]
14 #[derive(Clone, Debug, PartialEq)]
15 struct TimeStampedPathCopy {
15 struct TimeStampedPathCopy {
16 /// revision at which the copy information was added
16 /// revision at which the copy information was added
17 rev: Revision,
17 rev: Revision,
18 /// the copy source, (Set to None in case of deletion of the associated
18 /// the copy source, (Set to None in case of deletion of the associated
19 /// key)
19 /// key)
20 path: Option<HgPathBuf>,
20 path: Option<HgPathBuf>,
21 }
21 }
22
22
23 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
23 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
24 type TimeStampedPathCopies = OrdMap<HgPathBuf, TimeStampedPathCopy>;
24 type TimeStampedPathCopies = OrdMap<HgPathBuf, TimeStampedPathCopy>;
25
25
26 /// hold parent 1, parent 2 and relevant files actions.
26 /// hold parent 1, parent 2 and relevant files actions.
27 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
27 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
28
28
29 /// represent the files affected by a changesets
29 /// represent the files affected by a changesets
30 ///
30 ///
31 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
31 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
32 /// all the data categories tracked by it.
32 /// all the data categories tracked by it.
33 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
33 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
34 /// all the data categories tracked by it.
34 /// all the data categories tracked by it.
35 pub struct ChangedFiles<'a> {
35 pub struct ChangedFiles<'a> {
36 nb_items: u32,
36 nb_items: u32,
37 index: &'a [u8],
37 index: &'a [u8],
38 data: &'a [u8],
38 data: &'a [u8],
39 }
39 }
40
40
41 /// Represent active changes that affect the copy tracing.
41 /// Represent active changes that affect the copy tracing.
42 enum Action<'a> {
42 enum Action<'a> {
43 /// The parent ? children edge is removing a file
43 /// The parent ? children edge is removing a file
44 ///
44 ///
45 /// (actually, this could be the edge from the other parent, but it does
45 /// (actually, this could be the edge from the other parent, but it does
46 /// not matters)
46 /// not matters)
47 Removed(&'a HgPath),
47 Removed(&'a HgPath),
48 /// The parent ? children edge introduce copy information between (dest,
48 /// The parent ? children edge introduce copy information between (dest,
49 /// source)
49 /// source)
50 Copied(&'a HgPath, &'a HgPath),
50 Copied(&'a HgPath, &'a HgPath),
51 }
51 }
52
52
53 /// This express the possible "special" case we can get in a merge
53 /// This express the possible "special" case we can get in a merge
54 ///
54 ///
55 /// See mercurial/metadata.py for details on these values.
55 /// See mercurial/metadata.py for details on these values.
56 #[derive(PartialEq)]
56 #[derive(PartialEq)]
57 enum MergeCase {
57 enum MergeCase {
58 /// Merged: file had history on both side that needed to be merged
58 /// Merged: file had history on both side that needed to be merged
59 Merged,
59 Merged,
60 /// Salvaged: file was candidate for deletion, but survived the merge
60 /// Salvaged: file was candidate for deletion, but survived the merge
61 Salvaged,
61 Salvaged,
62 /// Normal: Not one of the two cases above
62 /// Normal: Not one of the two cases above
63 Normal,
63 Normal,
64 }
64 }
65
65
66 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
66 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
67
67
68 const EMPTY: &[u8] = b"";
68 const EMPTY: &[u8] = b"";
69 const COPY_MASK: u8 = 3;
69 const COPY_MASK: u8 = 3;
70 const P1_COPY: u8 = 2;
70 const P1_COPY: u8 = 2;
71 const P2_COPY: u8 = 3;
71 const P2_COPY: u8 = 3;
72 const ACTION_MASK: u8 = 28;
72 const ACTION_MASK: u8 = 28;
73 const REMOVED: u8 = 12;
73 const REMOVED: u8 = 12;
74 const MERGED: u8 = 8;
74 const MERGED: u8 = 8;
75 const SALVAGED: u8 = 16;
75 const SALVAGED: u8 = 16;
76
76
77 impl<'a> ChangedFiles<'a> {
77 impl<'a> ChangedFiles<'a> {
78 const INDEX_START: usize = 4;
78 const INDEX_START: usize = 4;
79 const ENTRY_SIZE: u32 = 9;
79 const ENTRY_SIZE: u32 = 9;
80 const FILENAME_START: u32 = 1;
80 const FILENAME_START: u32 = 1;
81 const COPY_SOURCE_START: u32 = 5;
81 const COPY_SOURCE_START: u32 = 5;
82
82
83 pub fn new(data: &'a [u8]) -> Self {
83 pub fn new(data: &'a [u8]) -> Self {
84 assert!(
84 assert!(
85 data.len() >= 4,
85 data.len() >= 4,
86 "data size ({}) is too small to contain the header (4)",
86 "data size ({}) is too small to contain the header (4)",
87 data.len()
87 data.len()
88 );
88 );
89 let nb_items_raw: [u8; 4] = (&data[0..=3])
89 let nb_items_raw: [u8; 4] = (&data[0..=3])
90 .try_into()
90 .try_into()
91 .expect("failed to turn 4 bytes into 4 bytes");
91 .expect("failed to turn 4 bytes into 4 bytes");
92 let nb_items = u32::from_be_bytes(nb_items_raw);
92 let nb_items = u32::from_be_bytes(nb_items_raw);
93
93
94 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
94 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
95 let index_end = Self::INDEX_START + index_size;
95 let index_end = Self::INDEX_START + index_size;
96
96
97 assert!(
97 assert!(
98 data.len() >= index_end,
98 data.len() >= index_end,
99 "data size ({}) is too small to fit the index_data ({})",
99 "data size ({}) is too small to fit the index_data ({})",
100 data.len(),
100 data.len(),
101 index_end
101 index_end
102 );
102 );
103
103
104 let ret = ChangedFiles {
104 let ret = ChangedFiles {
105 nb_items,
105 nb_items,
106 index: &data[Self::INDEX_START..index_end],
106 index: &data[Self::INDEX_START..index_end],
107 data: &data[index_end..],
107 data: &data[index_end..],
108 };
108 };
109 let max_data = ret.filename_end(nb_items - 1) as usize;
109 let max_data = ret.filename_end(nb_items - 1) as usize;
110 assert!(
110 assert!(
111 ret.data.len() >= max_data,
111 ret.data.len() >= max_data,
112 "data size ({}) is too small to fit all data ({})",
112 "data size ({}) is too small to fit all data ({})",
113 data.len(),
113 data.len(),
114 index_end + max_data
114 index_end + max_data
115 );
115 );
116 ret
116 ret
117 }
117 }
118
118
119 pub fn new_empty() -> Self {
119 pub fn new_empty() -> Self {
120 ChangedFiles {
120 ChangedFiles {
121 nb_items: 0,
121 nb_items: 0,
122 index: EMPTY,
122 index: EMPTY,
123 data: EMPTY,
123 data: EMPTY,
124 }
124 }
125 }
125 }
126
126
127 /// internal function to return an individual entry at a given index
127 /// internal function to return an individual entry at a given index
128 fn entry(&'a self, idx: u32) -> FileChange<'a> {
128 fn entry(&'a self, idx: u32) -> FileChange<'a> {
129 if idx >= self.nb_items {
129 if idx >= self.nb_items {
130 panic!(
130 panic!(
131 "index for entry is higher that the number of file {} >= {}",
131 "index for entry is higher that the number of file {} >= {}",
132 idx, self.nb_items
132 idx, self.nb_items
133 )
133 )
134 }
134 }
135 let flags = self.flags(idx);
135 let flags = self.flags(idx);
136 let filename = self.filename(idx);
136 let filename = self.filename(idx);
137 let copy_idx = self.copy_idx(idx);
137 let copy_idx = self.copy_idx(idx);
138 let copy_source = self.filename(copy_idx);
138 let copy_source = self.filename(copy_idx);
139 (flags, filename, copy_source)
139 (flags, filename, copy_source)
140 }
140 }
141
141
142 /// internal function to return the filename of the entry at a given index
142 /// internal function to return the filename of the entry at a given index
143 fn filename(&self, idx: u32) -> &HgPath {
143 fn filename(&self, idx: u32) -> &HgPath {
144 let filename_start;
144 let filename_start;
145 if idx == 0 {
145 if idx == 0 {
146 filename_start = 0;
146 filename_start = 0;
147 } else {
147 } else {
148 filename_start = self.filename_end(idx - 1)
148 filename_start = self.filename_end(idx - 1)
149 }
149 }
150 let filename_end = self.filename_end(idx);
150 let filename_end = self.filename_end(idx);
151 let filename_start = filename_start as usize;
151 let filename_start = filename_start as usize;
152 let filename_end = filename_end as usize;
152 let filename_end = filename_end as usize;
153 HgPath::new(&self.data[filename_start..filename_end])
153 HgPath::new(&self.data[filename_start..filename_end])
154 }
154 }
155
155
156 /// internal function to return the flag field of the entry at a given
156 /// internal function to return the flag field of the entry at a given
157 /// index
157 /// index
158 fn flags(&self, idx: u32) -> u8 {
158 fn flags(&self, idx: u32) -> u8 {
159 let idx = idx as usize;
159 let idx = idx as usize;
160 self.index[idx * (Self::ENTRY_SIZE as usize)]
160 self.index[idx * (Self::ENTRY_SIZE as usize)]
161 }
161 }
162
162
163 /// internal function to return the end of a filename part at a given index
163 /// internal function to return the end of a filename part at a given index
164 fn filename_end(&self, idx: u32) -> u32 {
164 fn filename_end(&self, idx: u32) -> u32 {
165 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
165 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
166 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
166 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
167 let start = start as usize;
167 let start = start as usize;
168 let end = end as usize;
168 let end = end as usize;
169 let raw = (&self.index[start..end])
169 let raw = (&self.index[start..end])
170 .try_into()
170 .try_into()
171 .expect("failed to turn 4 bytes into 4 bytes");
171 .expect("failed to turn 4 bytes into 4 bytes");
172 u32::from_be_bytes(raw)
172 u32::from_be_bytes(raw)
173 }
173 }
174
174
175 /// internal function to return index of the copy source of the entry at a
175 /// internal function to return index of the copy source of the entry at a
176 /// given index
176 /// given index
177 fn copy_idx(&self, idx: u32) -> u32 {
177 fn copy_idx(&self, idx: u32) -> u32 {
178 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
178 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
179 let end = (idx + 1) * Self::ENTRY_SIZE;
179 let end = (idx + 1) * Self::ENTRY_SIZE;
180 let start = start as usize;
180 let start = start as usize;
181 let end = end as usize;
181 let end = end as usize;
182 let raw = (&self.index[start..end])
182 let raw = (&self.index[start..end])
183 .try_into()
183 .try_into()
184 .expect("failed to turn 4 bytes into 4 bytes");
184 .expect("failed to turn 4 bytes into 4 bytes");
185 u32::from_be_bytes(raw)
185 u32::from_be_bytes(raw)
186 }
186 }
187
187
188 /// Return an iterator over all the `Action` in this instance.
188 /// Return an iterator over all the `Action` in this instance.
189 fn iter_actions(&self, parent: Parent) -> ActionsIterator {
189 fn iter_actions(&self, parent: Parent) -> ActionsIterator {
190 ActionsIterator {
190 ActionsIterator {
191 changes: &self,
191 changes: &self,
192 parent: parent,
192 parent: parent,
193 current: 0,
193 current: 0,
194 }
194 }
195 }
195 }
196
196
197 /// return the MergeCase value associated with a filename
197 /// return the MergeCase value associated with a filename
198 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
198 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
199 if self.nb_items == 0 {
199 if self.nb_items == 0 {
200 return MergeCase::Normal;
200 return MergeCase::Normal;
201 }
201 }
202 let mut low_part = 0;
202 let mut low_part = 0;
203 let mut high_part = self.nb_items;
203 let mut high_part = self.nb_items;
204
204
205 while low_part < high_part {
205 while low_part < high_part {
206 let cursor = (low_part + high_part - 1) / 2;
206 let cursor = (low_part + high_part - 1) / 2;
207 let (flags, filename, _source) = self.entry(cursor);
207 let (flags, filename, _source) = self.entry(cursor);
208 match path.cmp(filename) {
208 match path.cmp(filename) {
209 Ordering::Less => low_part = cursor + 1,
209 Ordering::Less => low_part = cursor + 1,
210 Ordering::Greater => high_part = cursor,
210 Ordering::Greater => high_part = cursor,
211 Ordering::Equal => {
211 Ordering::Equal => {
212 return match flags & ACTION_MASK {
212 return match flags & ACTION_MASK {
213 MERGED => MergeCase::Merged,
213 MERGED => MergeCase::Merged,
214 SALVAGED => MergeCase::Salvaged,
214 SALVAGED => MergeCase::Salvaged,
215 _ => MergeCase::Normal,
215 _ => MergeCase::Normal,
216 };
216 };
217 }
217 }
218 }
218 }
219 }
219 }
220 MergeCase::Normal
220 MergeCase::Normal
221 }
221 }
222 }
222 }
223
223
224 /// A struct responsible for answering "is X ancestors of Y" quickly
224 /// A struct responsible for answering "is X ancestors of Y" quickly
225 ///
225 ///
226 /// The structure will delegate ancestors call to a callback, and cache the
226 /// The structure will delegate ancestors call to a callback, and cache the
227 /// result.
227 /// result.
228 #[derive(Debug)]
228 #[derive(Debug)]
229 struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
229 struct AncestorOracle<'a, A: Fn(Revision, Revision) -> bool> {
230 inner: &'a A,
230 inner: &'a A,
231 pairs: HashMap<(Revision, Revision), bool>,
231 pairs: HashMap<(Revision, Revision), bool>,
232 }
232 }
233
233
234 impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
234 impl<'a, A: Fn(Revision, Revision) -> bool> AncestorOracle<'a, A> {
235 fn new(func: &'a A) -> Self {
235 fn new(func: &'a A) -> Self {
236 Self {
236 Self {
237 inner: func,
237 inner: func,
238 pairs: HashMap::default(),
238 pairs: HashMap::default(),
239 }
239 }
240 }
240 }
241
241
242 /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
242 /// returns `true` if `anc` is an ancestors of `desc`, `false` otherwise
243 fn is_ancestor(&mut self, anc: Revision, desc: Revision) -> bool {
243 fn is_ancestor(&mut self, anc: Revision, desc: Revision) -> bool {
244 if anc > desc {
244 if anc > desc {
245 false
245 false
246 } else if anc == desc {
246 } else if anc == desc {
247 true
247 true
248 } else {
248 } else {
249 if let Some(b) = self.pairs.get(&(anc, desc)) {
249 if let Some(b) = self.pairs.get(&(anc, desc)) {
250 *b
250 *b
251 } else {
251 } else {
252 let b = (self.inner)(anc, desc);
252 let b = (self.inner)(anc, desc);
253 self.pairs.insert((anc, desc), b);
253 self.pairs.insert((anc, desc), b);
254 b
254 b
255 }
255 }
256 }
256 }
257 }
257 }
258 }
258 }
259
259
260 struct ActionsIterator<'a> {
260 struct ActionsIterator<'a> {
261 changes: &'a ChangedFiles<'a>,
261 changes: &'a ChangedFiles<'a>,
262 parent: Parent,
262 parent: Parent,
263 current: u32,
263 current: u32,
264 }
264 }
265
265
266 impl<'a> Iterator for ActionsIterator<'a> {
266 impl<'a> Iterator for ActionsIterator<'a> {
267 type Item = Action<'a>;
267 type Item = Action<'a>;
268
268
269 fn next(&mut self) -> Option<Action<'a>> {
269 fn next(&mut self) -> Option<Action<'a>> {
270 let copy_flag = match self.parent {
270 let copy_flag = match self.parent {
271 Parent::FirstParent => P1_COPY,
271 Parent::FirstParent => P1_COPY,
272 Parent::SecondParent => P2_COPY,
272 Parent::SecondParent => P2_COPY,
273 };
273 };
274 while self.current < self.changes.nb_items {
274 while self.current < self.changes.nb_items {
275 let (flags, file, source) = self.changes.entry(self.current);
275 let (flags, file, source) = self.changes.entry(self.current);
276 self.current += 1;
276 self.current += 1;
277 if (flags & ACTION_MASK) == REMOVED {
277 if (flags & ACTION_MASK) == REMOVED {
278 return Some(Action::Removed(file));
278 return Some(Action::Removed(file));
279 }
279 }
280 let copy = flags & COPY_MASK;
280 let copy = flags & COPY_MASK;
281 if copy == copy_flag {
281 if copy == copy_flag {
282 return Some(Action::Copied(file, source));
282 return Some(Action::Copied(file, source));
283 }
283 }
284 }
284 }
285 return None;
285 return None;
286 }
286 }
287 }
287 }
288
288
289 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
289 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
290 /// ChangedFiles
290 /// ChangedFiles
291 ///
291 ///
292 /// It is passed to the RevInfoMaker callback who can assign any necessary
292 /// It is passed to the RevInfoMaker callback who can assign any necessary
293 /// content to the `data` attribute. The copy tracing code is responsible for
293 /// content to the `data` attribute. The copy tracing code is responsible for
294 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
294 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
295 pub struct DataHolder<D> {
295 pub struct DataHolder<D> {
296 /// RevInfoMaker callback should assign data referenced by the
296 /// RevInfoMaker callback should assign data referenced by the
297 /// ChangedFiles struct it return to this attribute. The DataHolder
297 /// ChangedFiles struct it return to this attribute. The DataHolder
298 /// lifetime will be at least as long as the ChangedFiles one.
298 /// lifetime will be at least as long as the ChangedFiles one.
299 pub data: Option<D>,
299 pub data: Option<D>,
300 }
300 }
301
301
302 pub type RevInfoMaker<'a, D> =
302 pub type RevInfoMaker<'a, D> =
303 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
303 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
304
304
305 /// enum used to carry information about the parent β†’ child currently processed
305 /// enum used to carry information about the parent β†’ child currently processed
306 #[derive(Copy, Clone, Debug)]
306 #[derive(Copy, Clone, Debug)]
307 enum Parent {
307 enum Parent {
308 /// The `p1(x) β†’ x` edge
308 /// The `p1(x) β†’ x` edge
309 FirstParent,
309 FirstParent,
310 /// The `p2(x) β†’ x` edge
310 /// The `p2(x) β†’ x` edge
311 SecondParent,
311 SecondParent,
312 }
312 }
313
313
314 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
314 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
315 ///
315 ///
316 /// Arguments are:
316 /// Arguments are:
317 ///
317 ///
318 /// revs: all revisions to be considered
318 /// revs: all revisions to be considered
319 /// children: a {parent ? [childrens]} mapping
319 /// children: a {parent ? [childrens]} mapping
320 /// target_rev: the final revision we are combining copies to
320 /// target_rev: the final revision we are combining copies to
321 /// rev_info(rev): callback to get revision information:
321 /// rev_info(rev): callback to get revision information:
322 /// * first parent
322 /// * first parent
323 /// * second parent
323 /// * second parent
324 /// * ChangedFiles
324 /// * ChangedFiles
325 /// isancestors(low_rev, high_rev): callback to check if a revision is an
325 /// isancestors(low_rev, high_rev): callback to check if a revision is an
326 /// ancestor of another
326 /// ancestor of another
327 pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
327 pub fn combine_changeset_copies<A: Fn(Revision, Revision) -> bool, D>(
328 revs: Vec<Revision>,
328 revs: Vec<Revision>,
329 children: HashMap<Revision, Vec<Revision>>,
329 children: HashMap<Revision, Vec<Revision>>,
330 target_rev: Revision,
330 target_rev: Revision,
331 rev_info: RevInfoMaker<D>,
331 rev_info: RevInfoMaker<D>,
332 is_ancestor: &A,
332 is_ancestor: &A,
333 ) -> PathCopies {
333 ) -> PathCopies {
334 let mut all_copies = HashMap::new();
334 let mut all_copies = HashMap::new();
335 let mut oracle = AncestorOracle::new(is_ancestor);
335 let mut oracle = AncestorOracle::new(is_ancestor);
336
336
337 for rev in revs {
337 for rev in revs {
338 // Retrieve data computed in a previous iteration
338 // Retrieve data computed in a previous iteration
339 let copies = all_copies.remove(&rev);
339 let copies = all_copies.remove(&rev);
340 let copies = match copies {
340 let copies = match copies {
341 Some(c) => c,
341 Some(c) => c,
342 None => TimeStampedPathCopies::default(), // root of the walked set
342 None => TimeStampedPathCopies::default(), // root of the walked set
343 };
343 };
344
344
345 let current_children = match children.get(&rev) {
345 let current_children = match children.get(&rev) {
346 Some(c) => c,
346 Some(c) => c,
347 None => panic!("inconsistent `revs` and `children`"),
347 None => panic!("inconsistent `revs` and `children`"),
348 };
348 };
349
349
350 for child in current_children {
350 for child in current_children {
351 // We will chain the copies information accumulated for `rev` with
351 // We will chain the copies information accumulated for `rev` with
352 // the individual copies information for each of its children.
352 // the individual copies information for each of its children.
353 // Creating a new PathCopies for each `rev` β†’ `children` vertex.
353 // Creating a new PathCopies for each `rev` β†’ `children` vertex.
354 let mut d: DataHolder<D> = DataHolder { data: None };
354 let mut d: DataHolder<D> = DataHolder { data: None };
355 let (p1, p2, changes) = rev_info(*child, &mut d);
355 let (p1, p2, changes) = rev_info(*child, &mut d);
356
356
357 let parent = if rev == p1 {
357 let parent = if rev == p1 {
358 Parent::FirstParent
358 Parent::FirstParent
359 } else {
359 } else {
360 assert_eq!(rev, p2);
360 assert_eq!(rev, p2);
361 Parent::SecondParent
361 Parent::SecondParent
362 };
362 };
363 let new_copies =
363 let new_copies =
364 add_from_changes(&copies, &changes, parent, *child);
364 add_from_changes(&copies, &changes, parent, *child);
365
365
366 // Merge has two parents needs to combines their copy information.
366 // Merge has two parents needs to combines their copy information.
367 //
367 //
368 // If the vertex from the other parent was already processed, we
368 // If the vertex from the other parent was already processed, we
369 // will have a value for the child ready to be used. We need to
369 // will have a value for the child ready to be used. We need to
370 // grab it and combine it with the one we already
370 // grab it and combine it with the one we already
371 // computed. If not we can simply store the newly
371 // computed. If not we can simply store the newly
372 // computed data. The processing happening at
372 // computed data. The processing happening at
373 // the time of the second parent will take care of combining the
373 // the time of the second parent will take care of combining the
374 // two TimeStampedPathCopies instance.
374 // two TimeStampedPathCopies instance.
375 match all_copies.remove(child) {
375 match all_copies.remove(child) {
376 None => {
376 None => {
377 all_copies.insert(child, new_copies);
377 all_copies.insert(child, new_copies);
378 }
378 }
379 Some(other_copies) => {
379 Some(other_copies) => {
380 let (minor, major) = match parent {
380 let (minor, major) = match parent {
381 Parent::FirstParent => (other_copies, new_copies),
381 Parent::FirstParent => (other_copies, new_copies),
382 Parent::SecondParent => (new_copies, other_copies),
382 Parent::SecondParent => (new_copies, other_copies),
383 };
383 };
384 let merged_copies =
384 let merged_copies =
385 merge_copies_dict(minor, major, &changes, &mut oracle);
385 merge_copies_dict(minor, major, &changes, &mut oracle);
386 all_copies.insert(child, merged_copies);
386 all_copies.insert(child, merged_copies);
387 }
387 }
388 };
388 };
389 }
389 }
390 }
390 }
391
391
392 // Drop internal information (like the timestamp) and return the final
392 // Drop internal information (like the timestamp) and return the final
393 // mapping.
393 // mapping.
394 let tt_result = all_copies
394 let tt_result = all_copies
395 .remove(&target_rev)
395 .remove(&target_rev)
396 .expect("target revision was not processed");
396 .expect("target revision was not processed");
397 let mut result = PathCopies::default();
397 let mut result = PathCopies::default();
398 for (dest, tt_source) in tt_result {
398 for (dest, tt_source) in tt_result {
399 if let Some(path) = tt_source.path {
399 if let Some(path) = tt_source.path {
400 result.insert(dest, path);
400 result.insert(dest, path);
401 }
401 }
402 }
402 }
403 result
403 result
404 }
404 }
405
405
406 /// Combine ChangedFiles with some existing PathCopies information and return
406 /// Combine ChangedFiles with some existing PathCopies information and return
407 /// the result
407 /// the result
408 fn add_from_changes(
408 fn add_from_changes(
409 base_copies: &TimeStampedPathCopies,
409 base_copies: &TimeStampedPathCopies,
410 changes: &ChangedFiles,
410 changes: &ChangedFiles,
411 parent: Parent,
411 parent: Parent,
412 current_rev: Revision,
412 current_rev: Revision,
413 ) -> TimeStampedPathCopies {
413 ) -> TimeStampedPathCopies {
414 let mut copies = base_copies.clone();
414 let mut copies = base_copies.clone();
415 for action in changes.iter_actions(parent) {
415 for action in changes.iter_actions(parent) {
416 match action {
416 match action {
417 Action::Copied(dest, source) => {
417 Action::Copied(dest, source) => {
418 let entry;
418 let entry;
419 if let Some(v) = base_copies.get(source) {
419 if let Some(v) = base_copies.get(source) {
420 entry = match &v.path {
420 entry = match &v.path {
421 Some(path) => Some((*(path)).to_owned()),
421 Some(path) => Some((*(path)).to_owned()),
422 None => Some(source.to_owned()),
422 None => Some(source.to_owned()),
423 }
423 }
424 } else {
424 } else {
425 entry = Some(source.to_owned());
425 entry = Some(source.to_owned());
426 }
426 }
427 // Each new entry is introduced by the children, we
427 // Each new entry is introduced by the children, we
428 // record this information as we will need it to take
428 // record this information as we will need it to take
429 // the right decision when merging conflicting copy
429 // the right decision when merging conflicting copy
430 // information. See merge_copies_dict for details.
430 // information. See merge_copies_dict for details.
431 let ttpc = TimeStampedPathCopy {
431 let ttpc = TimeStampedPathCopy {
432 rev: current_rev,
432 rev: current_rev,
433 path: entry,
433 path: entry,
434 };
434 };
435 copies.insert(dest.to_owned(), ttpc);
435 copies.insert(dest.to_owned(), ttpc);
436 }
436 }
437 Action::Removed(f) => {
437 Action::Removed(f) => {
438 // We must drop copy information for removed file.
438 // We must drop copy information for removed file.
439 //
439 //
440 // We need to explicitly record them as dropped to
440 // We need to explicitly record them as dropped to
441 // propagate this information when merging two
441 // propagate this information when merging two
442 // TimeStampedPathCopies object.
442 // TimeStampedPathCopies object.
443 if copies.contains_key(f.as_ref()) {
443 if copies.contains_key(f.as_ref()) {
444 let ttpc = TimeStampedPathCopy {
444 let ttpc = TimeStampedPathCopy {
445 rev: current_rev,
445 rev: current_rev,
446 path: None,
446 path: None,
447 };
447 };
448 copies.insert(f.to_owned(), ttpc);
448 copies.insert(f.to_owned(), ttpc);
449 }
449 }
450 }
450 }
451 }
451 }
452 }
452 }
453 copies
453 copies
454 }
454 }
455
455
456 /// merge two copies-mapping together, minor and major
456 /// merge two copies-mapping together, minor and major
457 ///
457 ///
458 /// In case of conflict, value from "major" will be picked, unless in some
458 /// In case of conflict, value from "major" will be picked, unless in some
459 /// cases. See inline documentation for details.
459 /// cases. See inline documentation for details.
460 fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
460 fn merge_copies_dict<A: Fn(Revision, Revision) -> bool>(
461 minor: TimeStampedPathCopies,
461 minor: TimeStampedPathCopies,
462 major: TimeStampedPathCopies,
462 major: TimeStampedPathCopies,
463 changes: &ChangedFiles,
463 changes: &ChangedFiles,
464 oracle: &mut AncestorOracle<A>,
464 oracle: &mut AncestorOracle<A>,
465 ) -> TimeStampedPathCopies {
465 ) -> TimeStampedPathCopies {
466 if minor.is_empty() {
466 if minor.is_empty() {
467 return major;
467 major
468 } else if major.is_empty() {
468 } else if major.is_empty() {
469 return minor;
469 minor
470 }
470 } else {
471 let mut override_minor = Vec::new();
471 let mut override_minor = Vec::new();
472 let mut override_major = Vec::new();
472 let mut override_major = Vec::new();
473
473
474 let mut to_major = |k: &HgPathBuf, v: &TimeStampedPathCopy| {
474 let mut to_major = |k: &HgPathBuf, v: &TimeStampedPathCopy| {
475 override_major.push((k.clone(), v.clone()))
475 override_major.push((k.clone(), v.clone()))
476 };
476 };
477 let mut to_minor = |k: &HgPathBuf, v: &TimeStampedPathCopy| {
477 let mut to_minor = |k: &HgPathBuf, v: &TimeStampedPathCopy| {
478 override_minor.push((k.clone(), v.clone()))
478 override_minor.push((k.clone(), v.clone()))
479 };
479 };
480
480
481 // The diff function leverage detection of the identical subpart if minor
481 // The diff function leverage detection of the identical subpart if
482 // and major has some common ancestors. This make it very fast is most
482 // minor and major has some common ancestors. This make it very
483 // case.
483 // fast is most case.
484 //
484 //
485 // In case where the two map are vastly different in size, the current
485 // In case where the two map are vastly different in size, the current
486 // approach is still slowish because the iteration will iterate over
486 // approach is still slowish because the iteration will iterate over
487 // all the "exclusive" content of the larger on. This situation can be
487 // all the "exclusive" content of the larger on. This situation can be
488 // frequent when the subgraph of revision we are processing has a lot
488 // frequent when the subgraph of revision we are processing has a lot
489 // of roots. Each roots adding they own fully new map to the mix (and
489 // of roots. Each roots adding they own fully new map to the mix (and
490 // likely a small map, if the path from the root to the "main path" is
490 // likely a small map, if the path from the root to the "main path" is
491 // small.
491 // small.
492 //
492 //
493 // We could do better by detecting such situation and processing them
493 // We could do better by detecting such situation and processing them
494 // differently.
494 // differently.
495 for d in minor.diff(&major) {
495 for d in minor.diff(&major) {
496 match d {
496 match d {
497 DiffItem::Add(k, v) => to_minor(k, v),
497 DiffItem::Add(k, v) => to_minor(k, v),
498 DiffItem::Remove(k, v) => to_major(k, v),
498 DiffItem::Remove(k, v) => to_major(k, v),
499 DiffItem::Update { old, new } => {
499 DiffItem::Update { old, new } => {
500 let (dest, src_major) = new;
500 let (dest, src_major) = new;
501 let (_, src_minor) = old;
501 let (_, src_minor) = old;
502 match compare_value(
502 match compare_value(
503 changes, oracle, dest, src_minor, src_major,
503 changes, oracle, dest, src_minor, src_major,
504 ) {
504 ) {
505 MergePick::Major => to_minor(dest, src_major),
505 MergePick::Major => to_minor(dest, src_major),
506 MergePick::Minor => to_major(dest, src_minor),
506 MergePick::Minor => to_major(dest, src_minor),
507 // If the two entry are identical, no need to do
507 // If the two entry are identical, no need to do
508 // anything (but diff should not have yield them)
508 // anything (but diff should not have yield them)
509 MergePick::Any => unreachable!(),
509 MergePick::Any => unreachable!(),
510 }
510 }
511 }
511 }
512 };
512 };
513 }
513 }
514
514
515 let updates;
515 let updates;
516 let mut result;
516 let mut result;
517 if override_major.is_empty() {
517 if override_major.is_empty() {
518 result = major
518 result = major
519 } else if override_minor.is_empty() {
519 } else if override_minor.is_empty() {
520 result = minor
520 result = minor
521 } else {
522 if override_minor.len() < override_major.len() {
523 updates = override_minor;
524 result = minor;
525 } else {
521 } else {
526 updates = override_major;
522 if override_minor.len() < override_major.len() {
527 result = major;
523 updates = override_minor;
524 result = minor;
525 } else {
526 updates = override_major;
527 result = major;
528 }
529 for (k, v) in updates {
530 result.insert(k, v);
531 }
528 }
532 }
529 for (k, v) in updates {
533 result
530 result.insert(k, v);
531 }
532 }
534 }
533 result
534 }
535 }
535
536
536 /// represent the side that should prevail when merging two
537 /// represent the side that should prevail when merging two
537 /// TimeStampedPathCopies
538 /// TimeStampedPathCopies
538 enum MergePick {
539 enum MergePick {
539 /// The "major" (p1) side prevails
540 /// The "major" (p1) side prevails
540 Major,
541 Major,
541 /// The "minor" (p2) side prevails
542 /// The "minor" (p2) side prevails
542 Minor,
543 Minor,
543 /// Any side could be used (because they are the same)
544 /// Any side could be used (because they are the same)
544 Any,
545 Any,
545 }
546 }
546
547
547 /// decide which side prevails in case of conflicting values
548 /// decide which side prevails in case of conflicting values
548 #[allow(clippy::if_same_then_else)]
549 #[allow(clippy::if_same_then_else)]
549 fn compare_value<A: Fn(Revision, Revision) -> bool>(
550 fn compare_value<A: Fn(Revision, Revision) -> bool>(
550 changes: &ChangedFiles,
551 changes: &ChangedFiles,
551 oracle: &mut AncestorOracle<A>,
552 oracle: &mut AncestorOracle<A>,
552 dest: &HgPathBuf,
553 dest: &HgPathBuf,
553 src_minor: &TimeStampedPathCopy,
554 src_minor: &TimeStampedPathCopy,
554 src_major: &TimeStampedPathCopy,
555 src_major: &TimeStampedPathCopy,
555 ) -> MergePick {
556 ) -> MergePick {
556 if src_major.path == src_minor.path {
557 if src_major.path == src_minor.path {
557 // we have the same value, but from other source;
558 // we have the same value, but from other source;
558 if src_major.rev == src_minor.rev {
559 if src_major.rev == src_minor.rev {
559 // If the two entry are identical, they are both valid
560 // If the two entry are identical, they are both valid
560 MergePick::Any
561 MergePick::Any
561 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
562 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
562 MergePick::Minor
563 MergePick::Minor
563 } else {
564 } else {
564 MergePick::Major
565 MergePick::Major
565 }
566 }
566 } else if src_major.rev == src_minor.rev {
567 } else if src_major.rev == src_minor.rev {
567 // We cannot get copy information for both p1 and p2 in the
568 // We cannot get copy information for both p1 and p2 in the
568 // same rev. So this is the same value.
569 // same rev. So this is the same value.
569 unreachable!(
570 unreachable!(
570 "conflict information from p1 and p2 in the same revision"
571 "conflict information from p1 and p2 in the same revision"
571 );
572 );
572 } else {
573 } else {
573 let action = changes.get_merge_case(&dest);
574 let action = changes.get_merge_case(&dest);
574 if src_major.path.is_none() && action == MergeCase::Salvaged {
575 if src_major.path.is_none() && action == MergeCase::Salvaged {
575 // If the file is "deleted" in the major side but was
576 // If the file is "deleted" in the major side but was
576 // salvaged by the merge, we keep the minor side alive
577 // salvaged by the merge, we keep the minor side alive
577 MergePick::Minor
578 MergePick::Minor
578 } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
579 } else if src_minor.path.is_none() && action == MergeCase::Salvaged {
579 // If the file is "deleted" in the minor side but was
580 // If the file is "deleted" in the minor side but was
580 // salvaged by the merge, unconditionnaly preserve the
581 // salvaged by the merge, unconditionnaly preserve the
581 // major side.
582 // major side.
582 MergePick::Major
583 MergePick::Major
583 } else if action == MergeCase::Merged {
584 } else if action == MergeCase::Merged {
584 // If the file was actively merged, copy information
585 // If the file was actively merged, copy information
585 // from each side might conflict. The major side will
586 // from each side might conflict. The major side will
586 // win such conflict.
587 // win such conflict.
587 MergePick::Major
588 MergePick::Major
588 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
589 } else if oracle.is_ancestor(src_major.rev, src_minor.rev) {
589 // If the minor side is strictly newer than the major
590 // If the minor side is strictly newer than the major
590 // side, it should be kept.
591 // side, it should be kept.
591 MergePick::Minor
592 MergePick::Minor
592 } else if src_major.path.is_some() {
593 } else if src_major.path.is_some() {
593 // without any special case, the "major" value win
594 // without any special case, the "major" value win
594 // other the "minor" one.
595 // other the "minor" one.
595 MergePick::Major
596 MergePick::Major
596 } else if oracle.is_ancestor(src_minor.rev, src_major.rev) {
597 } else if oracle.is_ancestor(src_minor.rev, src_major.rev) {
597 // the "major" rev is a direct ancestors of "minor",
598 // the "major" rev is a direct ancestors of "minor",
598 // any different value should
599 // any different value should
599 // overwrite
600 // overwrite
600 MergePick::Major
601 MergePick::Major
601 } else {
602 } else {
602 // major version is None (so the file was deleted on
603 // major version is None (so the file was deleted on
603 // that branch) and that branch is independant (neither
604 // that branch) and that branch is independant (neither
604 // minor nor major is an ancestors of the other one.)
605 // minor nor major is an ancestors of the other one.)
605 // We preserve the new
606 // We preserve the new
606 // information about the new file.
607 // information about the new file.
607 MergePick::Minor
608 MergePick::Minor
608 }
609 }
609 }
610 }
610 }
611 }
General Comments 0
You need to be logged in to leave comments. Login now