##// END OF EJS Templates
copies-rust: refactor the "deletion" case...
marmoute -
r47322:2076df13 default
parent child Browse files
Show More
@@ -1,904 +1,917
1 use crate::utils::hg_path::HgPath;
1 use crate::utils::hg_path::HgPath;
2 use crate::utils::hg_path::HgPathBuf;
2 use crate::utils::hg_path::HgPathBuf;
3 use crate::Revision;
3 use crate::Revision;
4 use crate::NULL_REVISION;
4 use crate::NULL_REVISION;
5
5
6 use im_rc::ordmap::DiffItem;
6 use im_rc::ordmap::DiffItem;
7 use im_rc::ordmap::Entry;
7 use im_rc::ordmap::Entry;
8 use im_rc::ordmap::OrdMap;
8 use im_rc::ordmap::OrdMap;
9
9
10 use std::cmp::Ordering;
10 use std::cmp::Ordering;
11 use std::collections::HashMap;
11 use std::collections::HashMap;
12 use std::collections::HashSet;
12 use std::collections::HashSet;
13 use std::convert::TryInto;
13 use std::convert::TryInto;
14
14
15 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
15 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
16
16
17 type PathToken = usize;
17 type PathToken = usize;
18
18
19 #[derive(Clone, Debug, PartialEq)]
19 #[derive(Clone, Debug, PartialEq)]
20 struct CopySource {
20 struct CopySource {
21 /// revision at which the copy information was added
21 /// revision at which the copy information was added
22 rev: Revision,
22 rev: Revision,
23 /// the copy source, (Set to None in case of deletion of the associated
23 /// the copy source, (Set to None in case of deletion of the associated
24 /// key)
24 /// key)
25 path: Option<PathToken>,
25 path: Option<PathToken>,
26 /// a set of previous `CopySource.rev` value directly or indirectly
26 /// a set of previous `CopySource.rev` value directly or indirectly
27 /// overwritten by this one.
27 /// overwritten by this one.
28 overwritten: HashSet<Revision>,
28 overwritten: HashSet<Revision>,
29 }
29 }
30
30
31 impl CopySource {
31 impl CopySource {
32 /// create a new CopySource
32 /// create a new CopySource
33 ///
33 ///
34 /// Use this when no previous copy source existed.
34 /// Use this when no previous copy source existed.
35 fn new(rev: Revision, path: Option<PathToken>) -> Self {
35 fn new(rev: Revision, path: Option<PathToken>) -> Self {
36 Self {
36 Self {
37 rev,
37 rev,
38 path,
38 path,
39 overwritten: HashSet::new(),
39 overwritten: HashSet::new(),
40 }
40 }
41 }
41 }
42
42
43 /// create a new CopySource from merging two others
43 /// create a new CopySource from merging two others
44 ///
44 ///
45 /// Use this when merging two InternalPathCopies requires active merging of
45 /// Use this when merging two InternalPathCopies requires active merging of
46 /// some entries.
46 /// some entries.
47 fn new_from_merge(rev: Revision, winner: &Self, loser: &Self) -> Self {
47 fn new_from_merge(rev: Revision, winner: &Self, loser: &Self) -> Self {
48 let mut overwritten = HashSet::new();
48 let mut overwritten = HashSet::new();
49 overwritten.extend(winner.overwritten.iter().copied());
49 overwritten.extend(winner.overwritten.iter().copied());
50 overwritten.extend(loser.overwritten.iter().copied());
50 overwritten.extend(loser.overwritten.iter().copied());
51 overwritten.insert(winner.rev);
51 overwritten.insert(winner.rev);
52 overwritten.insert(loser.rev);
52 overwritten.insert(loser.rev);
53 Self {
53 Self {
54 rev,
54 rev,
55 path: winner.path,
55 path: winner.path,
56 overwritten: overwritten,
56 overwritten: overwritten,
57 }
57 }
58 }
58 }
59
59
60 /// Update the value of a pre-existing CopySource
60 /// Update the value of a pre-existing CopySource
61 ///
61 ///
62 /// Use this when recording copy information from parent β†’ child edges
62 /// Use this when recording copy information from parent β†’ child edges
63 fn overwrite(&mut self, rev: Revision, path: Option<PathToken>) {
63 fn overwrite(&mut self, rev: Revision, path: Option<PathToken>) {
64 self.overwritten.insert(self.rev);
64 self.overwritten.insert(self.rev);
65 self.rev = rev;
65 self.rev = rev;
66 self.path = path;
66 self.path = path;
67 }
67 }
68
68
69 /// Mark pre-existing copy information as "dropped" by a file deletion
69 /// Mark pre-existing copy information as "dropped" by a file deletion
70 ///
70 ///
71 /// Use this when recording copy information from parent β†’ child edges
71 /// Use this when recording copy information from parent β†’ child edges
72 fn mark_delete(&mut self, rev: Revision) {
72 fn mark_delete(&mut self, rev: Revision) {
73 self.overwritten.insert(self.rev);
73 self.overwritten.insert(self.rev);
74 self.rev = rev;
74 self.rev = rev;
75 self.path = None;
75 self.path = None;
76 }
76 }
77
77
78 fn is_overwritten_by(&self, other: &Self) -> bool {
78 fn is_overwritten_by(&self, other: &Self) -> bool {
79 other.overwritten.contains(&self.rev)
79 other.overwritten.contains(&self.rev)
80 }
80 }
81 }
81 }
82
82
83 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
83 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
84 type InternalPathCopies = OrdMap<PathToken, CopySource>;
84 type InternalPathCopies = OrdMap<PathToken, CopySource>;
85
85
86 /// hold parent 1, parent 2 and relevant files actions.
86 /// hold parent 1, parent 2 and relevant files actions.
87 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
87 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
88
88
89 /// represent the files affected by a changesets
89 /// represent the files affected by a changesets
90 ///
90 ///
91 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
91 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
92 /// all the data categories tracked by it.
92 /// all the data categories tracked by it.
93 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
93 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
94 /// all the data categories tracked by it.
94 /// all the data categories tracked by it.
95 pub struct ChangedFiles<'a> {
95 pub struct ChangedFiles<'a> {
96 nb_items: u32,
96 nb_items: u32,
97 index: &'a [u8],
97 index: &'a [u8],
98 data: &'a [u8],
98 data: &'a [u8],
99 }
99 }
100
100
101 /// Represent active changes that affect the copy tracing.
101 /// Represent active changes that affect the copy tracing.
102 enum Action<'a> {
102 enum Action<'a> {
103 /// The parent ? children edge is removing a file
103 /// The parent ? children edge is removing a file
104 ///
104 ///
105 /// (actually, this could be the edge from the other parent, but it does
105 /// (actually, this could be the edge from the other parent, but it does
106 /// not matters)
106 /// not matters)
107 Removed(&'a HgPath),
107 Removed(&'a HgPath),
108 /// The parent ? children edge introduce copy information between (dest,
108 /// The parent ? children edge introduce copy information between (dest,
109 /// source)
109 /// source)
110 CopiedFromP1(&'a HgPath, &'a HgPath),
110 CopiedFromP1(&'a HgPath, &'a HgPath),
111 CopiedFromP2(&'a HgPath, &'a HgPath),
111 CopiedFromP2(&'a HgPath, &'a HgPath),
112 }
112 }
113
113
114 /// This express the possible "special" case we can get in a merge
114 /// This express the possible "special" case we can get in a merge
115 ///
115 ///
116 /// See mercurial/metadata.py for details on these values.
116 /// See mercurial/metadata.py for details on these values.
117 #[derive(PartialEq)]
117 #[derive(PartialEq)]
118 enum MergeCase {
118 enum MergeCase {
119 /// Merged: file had history on both side that needed to be merged
119 /// Merged: file had history on both side that needed to be merged
120 Merged,
120 Merged,
121 /// Salvaged: file was candidate for deletion, but survived the merge
121 /// Salvaged: file was candidate for deletion, but survived the merge
122 Salvaged,
122 Salvaged,
123 /// Normal: Not one of the two cases above
123 /// Normal: Not one of the two cases above
124 Normal,
124 Normal,
125 }
125 }
126
126
127 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
127 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
128
128
129 const EMPTY: &[u8] = b"";
129 const EMPTY: &[u8] = b"";
130 const COPY_MASK: u8 = 3;
130 const COPY_MASK: u8 = 3;
131 const P1_COPY: u8 = 2;
131 const P1_COPY: u8 = 2;
132 const P2_COPY: u8 = 3;
132 const P2_COPY: u8 = 3;
133 const ACTION_MASK: u8 = 28;
133 const ACTION_MASK: u8 = 28;
134 const REMOVED: u8 = 12;
134 const REMOVED: u8 = 12;
135 const MERGED: u8 = 8;
135 const MERGED: u8 = 8;
136 const SALVAGED: u8 = 16;
136 const SALVAGED: u8 = 16;
137
137
138 impl<'a> ChangedFiles<'a> {
138 impl<'a> ChangedFiles<'a> {
139 const INDEX_START: usize = 4;
139 const INDEX_START: usize = 4;
140 const ENTRY_SIZE: u32 = 9;
140 const ENTRY_SIZE: u32 = 9;
141 const FILENAME_START: u32 = 1;
141 const FILENAME_START: u32 = 1;
142 const COPY_SOURCE_START: u32 = 5;
142 const COPY_SOURCE_START: u32 = 5;
143
143
144 pub fn new(data: &'a [u8]) -> Self {
144 pub fn new(data: &'a [u8]) -> Self {
145 assert!(
145 assert!(
146 data.len() >= 4,
146 data.len() >= 4,
147 "data size ({}) is too small to contain the header (4)",
147 "data size ({}) is too small to contain the header (4)",
148 data.len()
148 data.len()
149 );
149 );
150 let nb_items_raw: [u8; 4] = (&data[0..=3])
150 let nb_items_raw: [u8; 4] = (&data[0..=3])
151 .try_into()
151 .try_into()
152 .expect("failed to turn 4 bytes into 4 bytes");
152 .expect("failed to turn 4 bytes into 4 bytes");
153 let nb_items = u32::from_be_bytes(nb_items_raw);
153 let nb_items = u32::from_be_bytes(nb_items_raw);
154
154
155 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
155 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
156 let index_end = Self::INDEX_START + index_size;
156 let index_end = Self::INDEX_START + index_size;
157
157
158 assert!(
158 assert!(
159 data.len() >= index_end,
159 data.len() >= index_end,
160 "data size ({}) is too small to fit the index_data ({})",
160 "data size ({}) is too small to fit the index_data ({})",
161 data.len(),
161 data.len(),
162 index_end
162 index_end
163 );
163 );
164
164
165 let ret = ChangedFiles {
165 let ret = ChangedFiles {
166 nb_items,
166 nb_items,
167 index: &data[Self::INDEX_START..index_end],
167 index: &data[Self::INDEX_START..index_end],
168 data: &data[index_end..],
168 data: &data[index_end..],
169 };
169 };
170 let max_data = ret.filename_end(nb_items - 1) as usize;
170 let max_data = ret.filename_end(nb_items - 1) as usize;
171 assert!(
171 assert!(
172 ret.data.len() >= max_data,
172 ret.data.len() >= max_data,
173 "data size ({}) is too small to fit all data ({})",
173 "data size ({}) is too small to fit all data ({})",
174 data.len(),
174 data.len(),
175 index_end + max_data
175 index_end + max_data
176 );
176 );
177 ret
177 ret
178 }
178 }
179
179
180 pub fn new_empty() -> Self {
180 pub fn new_empty() -> Self {
181 ChangedFiles {
181 ChangedFiles {
182 nb_items: 0,
182 nb_items: 0,
183 index: EMPTY,
183 index: EMPTY,
184 data: EMPTY,
184 data: EMPTY,
185 }
185 }
186 }
186 }
187
187
188 /// internal function to return an individual entry at a given index
188 /// internal function to return an individual entry at a given index
189 fn entry(&'a self, idx: u32) -> FileChange<'a> {
189 fn entry(&'a self, idx: u32) -> FileChange<'a> {
190 if idx >= self.nb_items {
190 if idx >= self.nb_items {
191 panic!(
191 panic!(
192 "index for entry is higher that the number of file {} >= {}",
192 "index for entry is higher that the number of file {} >= {}",
193 idx, self.nb_items
193 idx, self.nb_items
194 )
194 )
195 }
195 }
196 let flags = self.flags(idx);
196 let flags = self.flags(idx);
197 let filename = self.filename(idx);
197 let filename = self.filename(idx);
198 let copy_idx = self.copy_idx(idx);
198 let copy_idx = self.copy_idx(idx);
199 let copy_source = self.filename(copy_idx);
199 let copy_source = self.filename(copy_idx);
200 (flags, filename, copy_source)
200 (flags, filename, copy_source)
201 }
201 }
202
202
203 /// internal function to return the filename of the entry at a given index
203 /// internal function to return the filename of the entry at a given index
204 fn filename(&self, idx: u32) -> &HgPath {
204 fn filename(&self, idx: u32) -> &HgPath {
205 let filename_start;
205 let filename_start;
206 if idx == 0 {
206 if idx == 0 {
207 filename_start = 0;
207 filename_start = 0;
208 } else {
208 } else {
209 filename_start = self.filename_end(idx - 1)
209 filename_start = self.filename_end(idx - 1)
210 }
210 }
211 let filename_end = self.filename_end(idx);
211 let filename_end = self.filename_end(idx);
212 let filename_start = filename_start as usize;
212 let filename_start = filename_start as usize;
213 let filename_end = filename_end as usize;
213 let filename_end = filename_end as usize;
214 HgPath::new(&self.data[filename_start..filename_end])
214 HgPath::new(&self.data[filename_start..filename_end])
215 }
215 }
216
216
217 /// internal function to return the flag field of the entry at a given
217 /// internal function to return the flag field of the entry at a given
218 /// index
218 /// index
219 fn flags(&self, idx: u32) -> u8 {
219 fn flags(&self, idx: u32) -> u8 {
220 let idx = idx as usize;
220 let idx = idx as usize;
221 self.index[idx * (Self::ENTRY_SIZE as usize)]
221 self.index[idx * (Self::ENTRY_SIZE as usize)]
222 }
222 }
223
223
224 /// internal function to return the end of a filename part at a given index
224 /// internal function to return the end of a filename part at a given index
225 fn filename_end(&self, idx: u32) -> u32 {
225 fn filename_end(&self, idx: u32) -> u32 {
226 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
226 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
227 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
227 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
228 let start = start as usize;
228 let start = start as usize;
229 let end = end as usize;
229 let end = end as usize;
230 let raw = (&self.index[start..end])
230 let raw = (&self.index[start..end])
231 .try_into()
231 .try_into()
232 .expect("failed to turn 4 bytes into 4 bytes");
232 .expect("failed to turn 4 bytes into 4 bytes");
233 u32::from_be_bytes(raw)
233 u32::from_be_bytes(raw)
234 }
234 }
235
235
236 /// internal function to return index of the copy source of the entry at a
236 /// internal function to return index of the copy source of the entry at a
237 /// given index
237 /// given index
238 fn copy_idx(&self, idx: u32) -> u32 {
238 fn copy_idx(&self, idx: u32) -> u32 {
239 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
239 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
240 let end = (idx + 1) * Self::ENTRY_SIZE;
240 let end = (idx + 1) * Self::ENTRY_SIZE;
241 let start = start as usize;
241 let start = start as usize;
242 let end = end as usize;
242 let end = end as usize;
243 let raw = (&self.index[start..end])
243 let raw = (&self.index[start..end])
244 .try_into()
244 .try_into()
245 .expect("failed to turn 4 bytes into 4 bytes");
245 .expect("failed to turn 4 bytes into 4 bytes");
246 u32::from_be_bytes(raw)
246 u32::from_be_bytes(raw)
247 }
247 }
248
248
249 /// Return an iterator over all the `Action` in this instance.
249 /// Return an iterator over all the `Action` in this instance.
250 fn iter_actions(&self) -> ActionsIterator {
250 fn iter_actions(&self) -> ActionsIterator {
251 ActionsIterator {
251 ActionsIterator {
252 changes: &self,
252 changes: &self,
253 current: 0,
253 current: 0,
254 }
254 }
255 }
255 }
256
256
257 /// return the MergeCase value associated with a filename
257 /// return the MergeCase value associated with a filename
258 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
258 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
259 if self.nb_items == 0 {
259 if self.nb_items == 0 {
260 return MergeCase::Normal;
260 return MergeCase::Normal;
261 }
261 }
262 let mut low_part = 0;
262 let mut low_part = 0;
263 let mut high_part = self.nb_items;
263 let mut high_part = self.nb_items;
264
264
265 while low_part < high_part {
265 while low_part < high_part {
266 let cursor = (low_part + high_part - 1) / 2;
266 let cursor = (low_part + high_part - 1) / 2;
267 let (flags, filename, _source) = self.entry(cursor);
267 let (flags, filename, _source) = self.entry(cursor);
268 match path.cmp(filename) {
268 match path.cmp(filename) {
269 Ordering::Less => low_part = cursor + 1,
269 Ordering::Less => low_part = cursor + 1,
270 Ordering::Greater => high_part = cursor,
270 Ordering::Greater => high_part = cursor,
271 Ordering::Equal => {
271 Ordering::Equal => {
272 return match flags & ACTION_MASK {
272 return match flags & ACTION_MASK {
273 MERGED => MergeCase::Merged,
273 MERGED => MergeCase::Merged,
274 SALVAGED => MergeCase::Salvaged,
274 SALVAGED => MergeCase::Salvaged,
275 _ => MergeCase::Normal,
275 _ => MergeCase::Normal,
276 };
276 };
277 }
277 }
278 }
278 }
279 }
279 }
280 MergeCase::Normal
280 MergeCase::Normal
281 }
281 }
282 }
282 }
283
283
284 struct ActionsIterator<'a> {
284 struct ActionsIterator<'a> {
285 changes: &'a ChangedFiles<'a>,
285 changes: &'a ChangedFiles<'a>,
286 current: u32,
286 current: u32,
287 }
287 }
288
288
289 impl<'a> Iterator for ActionsIterator<'a> {
289 impl<'a> Iterator for ActionsIterator<'a> {
290 type Item = Action<'a>;
290 type Item = Action<'a>;
291
291
292 fn next(&mut self) -> Option<Action<'a>> {
292 fn next(&mut self) -> Option<Action<'a>> {
293 while self.current < self.changes.nb_items {
293 while self.current < self.changes.nb_items {
294 let (flags, file, source) = self.changes.entry(self.current);
294 let (flags, file, source) = self.changes.entry(self.current);
295 self.current += 1;
295 self.current += 1;
296 if (flags & ACTION_MASK) == REMOVED {
296 if (flags & ACTION_MASK) == REMOVED {
297 return Some(Action::Removed(file));
297 return Some(Action::Removed(file));
298 }
298 }
299 let copy = flags & COPY_MASK;
299 let copy = flags & COPY_MASK;
300 if copy == P1_COPY {
300 if copy == P1_COPY {
301 return Some(Action::CopiedFromP1(file, source));
301 return Some(Action::CopiedFromP1(file, source));
302 } else if copy == P2_COPY {
302 } else if copy == P2_COPY {
303 return Some(Action::CopiedFromP2(file, source));
303 return Some(Action::CopiedFromP2(file, source));
304 }
304 }
305 }
305 }
306 return None;
306 return None;
307 }
307 }
308 }
308 }
309
309
310 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
310 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
311 /// ChangedFiles
311 /// ChangedFiles
312 ///
312 ///
313 /// It is passed to the RevInfoMaker callback who can assign any necessary
313 /// It is passed to the RevInfoMaker callback who can assign any necessary
314 /// content to the `data` attribute. The copy tracing code is responsible for
314 /// content to the `data` attribute. The copy tracing code is responsible for
315 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
315 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
316 pub struct DataHolder<D> {
316 pub struct DataHolder<D> {
317 /// RevInfoMaker callback should assign data referenced by the
317 /// RevInfoMaker callback should assign data referenced by the
318 /// ChangedFiles struct it return to this attribute. The DataHolder
318 /// ChangedFiles struct it return to this attribute. The DataHolder
319 /// lifetime will be at least as long as the ChangedFiles one.
319 /// lifetime will be at least as long as the ChangedFiles one.
320 pub data: Option<D>,
320 pub data: Option<D>,
321 }
321 }
322
322
323 pub type RevInfoMaker<'a, D> =
323 pub type RevInfoMaker<'a, D> =
324 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
324 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
325
325
326 /// A small "tokenizer" responsible of turning full HgPath into lighter
326 /// A small "tokenizer" responsible of turning full HgPath into lighter
327 /// PathToken
327 /// PathToken
328 ///
328 ///
329 /// Dealing with small object, like integer is much faster, so HgPath input are
329 /// Dealing with small object, like integer is much faster, so HgPath input are
330 /// turned into integer "PathToken" and converted back in the end.
330 /// turned into integer "PathToken" and converted back in the end.
331 #[derive(Clone, Debug, Default)]
331 #[derive(Clone, Debug, Default)]
332 struct TwoWayPathMap {
332 struct TwoWayPathMap {
333 token: HashMap<HgPathBuf, PathToken>,
333 token: HashMap<HgPathBuf, PathToken>,
334 path: Vec<HgPathBuf>,
334 path: Vec<HgPathBuf>,
335 }
335 }
336
336
337 impl TwoWayPathMap {
337 impl TwoWayPathMap {
338 fn tokenize(&mut self, path: &HgPath) -> PathToken {
338 fn tokenize(&mut self, path: &HgPath) -> PathToken {
339 match self.token.get(path) {
339 match self.token.get(path) {
340 Some(a) => *a,
340 Some(a) => *a,
341 None => {
341 None => {
342 let a = self.token.len();
342 let a = self.token.len();
343 let buf = path.to_owned();
343 let buf = path.to_owned();
344 self.path.push(buf.clone());
344 self.path.push(buf.clone());
345 self.token.insert(buf, a);
345 self.token.insert(buf, a);
346 a
346 a
347 }
347 }
348 }
348 }
349 }
349 }
350
350
351 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
351 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
352 assert!(token < self.path.len(), format!("Unknown token: {}", token));
352 assert!(token < self.path.len(), format!("Unknown token: {}", token));
353 &self.path[token]
353 &self.path[token]
354 }
354 }
355 }
355 }
356
356
357 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
357 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
358 ///
358 ///
359 /// Arguments are:
359 /// Arguments are:
360 ///
360 ///
361 /// revs: all revisions to be considered
361 /// revs: all revisions to be considered
362 /// children: a {parent ? [childrens]} mapping
362 /// children: a {parent ? [childrens]} mapping
363 /// target_rev: the final revision we are combining copies to
363 /// target_rev: the final revision we are combining copies to
364 /// rev_info(rev): callback to get revision information:
364 /// rev_info(rev): callback to get revision information:
365 /// * first parent
365 /// * first parent
366 /// * second parent
366 /// * second parent
367 /// * ChangedFiles
367 /// * ChangedFiles
368 /// isancestors(low_rev, high_rev): callback to check if a revision is an
368 /// isancestors(low_rev, high_rev): callback to check if a revision is an
369 /// ancestor of another
369 /// ancestor of another
370 pub fn combine_changeset_copies<D>(
370 pub fn combine_changeset_copies<D>(
371 revs: Vec<Revision>,
371 revs: Vec<Revision>,
372 mut children_count: HashMap<Revision, usize>,
372 mut children_count: HashMap<Revision, usize>,
373 target_rev: Revision,
373 target_rev: Revision,
374 rev_info: RevInfoMaker<D>,
374 rev_info: RevInfoMaker<D>,
375 ) -> PathCopies {
375 ) -> PathCopies {
376 let mut all_copies = HashMap::new();
376 let mut all_copies = HashMap::new();
377
377
378 let mut path_map = TwoWayPathMap::default();
378 let mut path_map = TwoWayPathMap::default();
379
379
380 for rev in revs {
380 for rev in revs {
381 let mut d: DataHolder<D> = DataHolder { data: None };
381 let mut d: DataHolder<D> = DataHolder { data: None };
382 let (p1, p2, changes) = rev_info(rev, &mut d);
382 let (p1, p2, changes) = rev_info(rev, &mut d);
383
383
384 // We will chain the copies information accumulated for the parent with
384 // We will chain the copies information accumulated for the parent with
385 // the individual copies information the curent revision. Creating a
385 // the individual copies information the curent revision. Creating a
386 // new TimeStampedPath for each `rev` β†’ `children` vertex.
386 // new TimeStampedPath for each `rev` β†’ `children` vertex.
387 // Retrieve data computed in a previous iteration
387 // Retrieve data computed in a previous iteration
388 let p1_copies = match p1 {
388 let p1_copies = match p1 {
389 NULL_REVISION => None,
389 NULL_REVISION => None,
390 _ => get_and_clean_parent_copies(
390 _ => get_and_clean_parent_copies(
391 &mut all_copies,
391 &mut all_copies,
392 &mut children_count,
392 &mut children_count,
393 p1,
393 p1,
394 ), // will be None if the vertex is not to be traversed
394 ), // will be None if the vertex is not to be traversed
395 };
395 };
396 let p2_copies = match p2 {
396 let p2_copies = match p2 {
397 NULL_REVISION => None,
397 NULL_REVISION => None,
398 _ => get_and_clean_parent_copies(
398 _ => get_and_clean_parent_copies(
399 &mut all_copies,
399 &mut all_copies,
400 &mut children_count,
400 &mut children_count,
401 p2,
401 p2,
402 ), // will be None if the vertex is not to be traversed
402 ), // will be None if the vertex is not to be traversed
403 };
403 };
404 // combine it with data for that revision
404 // combine it with data for that revision
405 let (p1_copies, p2_copies) =
405 let (p1_copies, p2_copies) =
406 chain_changes(&mut path_map, p1_copies, p2_copies, &changes, rev);
406 chain_changes(&mut path_map, p1_copies, p2_copies, &changes, rev);
407 let copies = match (p1_copies, p2_copies) {
407 let copies = match (p1_copies, p2_copies) {
408 (None, None) => None,
408 (None, None) => None,
409 (c, None) => c,
409 (c, None) => c,
410 (None, c) => c,
410 (None, c) => c,
411 (Some(p1_copies), Some(p2_copies)) => Some(merge_copies_dict(
411 (Some(p1_copies), Some(p2_copies)) => Some(merge_copies_dict(
412 &path_map, rev, p2_copies, p1_copies, &changes,
412 &path_map, rev, p2_copies, p1_copies, &changes,
413 )),
413 )),
414 };
414 };
415 if let Some(c) = copies {
415 if let Some(c) = copies {
416 all_copies.insert(rev, c);
416 all_copies.insert(rev, c);
417 }
417 }
418 }
418 }
419
419
420 // Drop internal information (like the timestamp) and return the final
420 // Drop internal information (like the timestamp) and return the final
421 // mapping.
421 // mapping.
422 let tt_result = all_copies
422 let tt_result = all_copies
423 .remove(&target_rev)
423 .remove(&target_rev)
424 .expect("target revision was not processed");
424 .expect("target revision was not processed");
425 let mut result = PathCopies::default();
425 let mut result = PathCopies::default();
426 for (dest, tt_source) in tt_result {
426 for (dest, tt_source) in tt_result {
427 if let Some(path) = tt_source.path {
427 if let Some(path) = tt_source.path {
428 let path_dest = path_map.untokenize(dest).to_owned();
428 let path_dest = path_map.untokenize(dest).to_owned();
429 let path_path = path_map.untokenize(path).to_owned();
429 let path_path = path_map.untokenize(path).to_owned();
430 result.insert(path_dest, path_path);
430 result.insert(path_dest, path_path);
431 }
431 }
432 }
432 }
433 result
433 result
434 }
434 }
435
435
436 /// fetch previous computed information
436 /// fetch previous computed information
437 ///
437 ///
438 /// If no other children are expected to need this information, we drop it from
438 /// If no other children are expected to need this information, we drop it from
439 /// the cache.
439 /// the cache.
440 ///
440 ///
441 /// If parent is not part of the set we are expected to walk, return None.
441 /// If parent is not part of the set we are expected to walk, return None.
442 fn get_and_clean_parent_copies(
442 fn get_and_clean_parent_copies(
443 all_copies: &mut HashMap<Revision, InternalPathCopies>,
443 all_copies: &mut HashMap<Revision, InternalPathCopies>,
444 children_count: &mut HashMap<Revision, usize>,
444 children_count: &mut HashMap<Revision, usize>,
445 parent_rev: Revision,
445 parent_rev: Revision,
446 ) -> Option<InternalPathCopies> {
446 ) -> Option<InternalPathCopies> {
447 let count = children_count.get_mut(&parent_rev)?;
447 let count = children_count.get_mut(&parent_rev)?;
448 *count -= 1;
448 *count -= 1;
449 if *count == 0 {
449 if *count == 0 {
450 match all_copies.remove(&parent_rev) {
450 match all_copies.remove(&parent_rev) {
451 Some(c) => Some(c),
451 Some(c) => Some(c),
452 None => Some(InternalPathCopies::default()),
452 None => Some(InternalPathCopies::default()),
453 }
453 }
454 } else {
454 } else {
455 match all_copies.get(&parent_rev) {
455 match all_copies.get(&parent_rev) {
456 Some(c) => Some(c.clone()),
456 Some(c) => Some(c.clone()),
457 None => Some(InternalPathCopies::default()),
457 None => Some(InternalPathCopies::default()),
458 }
458 }
459 }
459 }
460 }
460 }
461
461
462 /// Combine ChangedFiles with some existing PathCopies information and return
462 /// Combine ChangedFiles with some existing PathCopies information and return
463 /// the result
463 /// the result
464 fn chain_changes(
464 fn chain_changes(
465 path_map: &mut TwoWayPathMap,
465 path_map: &mut TwoWayPathMap,
466 base_p1_copies: Option<InternalPathCopies>,
466 base_p1_copies: Option<InternalPathCopies>,
467 base_p2_copies: Option<InternalPathCopies>,
467 base_p2_copies: Option<InternalPathCopies>,
468 changes: &ChangedFiles,
468 changes: &ChangedFiles,
469 current_rev: Revision,
469 current_rev: Revision,
470 ) -> (Option<InternalPathCopies>, Option<InternalPathCopies>) {
470 ) -> (Option<InternalPathCopies>, Option<InternalPathCopies>) {
471 // Fast path the "nothing to do" case.
471 // Fast path the "nothing to do" case.
472 if let (None, None) = (&base_p1_copies, &base_p2_copies) {
472 if let (None, None) = (&base_p1_copies, &base_p2_copies) {
473 return (None, None);
473 return (None, None);
474 }
474 }
475
475
476 let mut p1_copies = base_p1_copies.clone();
476 let mut p1_copies = base_p1_copies.clone();
477 let mut p2_copies = base_p2_copies.clone();
477 let mut p2_copies = base_p2_copies.clone();
478 for action in changes.iter_actions() {
478 for action in changes.iter_actions() {
479 match action {
479 match action {
480 Action::CopiedFromP1(path_dest, path_source) => {
480 Action::CopiedFromP1(path_dest, path_source) => {
481 match &mut p1_copies {
481 match &mut p1_copies {
482 None => (), // This is not a vertex we should proceed.
482 None => (), // This is not a vertex we should proceed.
483 Some(copies) => add_one_copy(
483 Some(copies) => add_one_copy(
484 current_rev,
484 current_rev,
485 path_map,
485 path_map,
486 copies,
486 copies,
487 base_p1_copies.as_ref().unwrap(),
487 base_p1_copies.as_ref().unwrap(),
488 path_dest,
488 path_dest,
489 path_source,
489 path_source,
490 ),
490 ),
491 }
491 }
492 }
492 }
493 Action::CopiedFromP2(path_dest, path_source) => {
493 Action::CopiedFromP2(path_dest, path_source) => {
494 match &mut p2_copies {
494 match &mut p2_copies {
495 None => (), // This is not a vertex we should proceed.
495 None => (), // This is not a vertex we should proceed.
496 Some(copies) => add_one_copy(
496 Some(copies) => add_one_copy(
497 current_rev,
497 current_rev,
498 path_map,
498 path_map,
499 copies,
499 copies,
500 base_p2_copies.as_ref().unwrap(),
500 base_p2_copies.as_ref().unwrap(),
501 path_dest,
501 path_dest,
502 path_source,
502 path_source,
503 ),
503 ),
504 }
504 }
505 }
505 }
506 Action::Removed(deleted_path) => {
506 Action::Removed(deleted_path) => {
507 // We must drop copy information for removed file.
507 // We must drop copy information for removed file.
508 //
508 //
509 // We need to explicitly record them as dropped to
509 // We need to explicitly record them as dropped to
510 // propagate this information when merging two
510 // propagate this information when merging two
511 // InternalPathCopies object.
511 // InternalPathCopies object.
512 let deleted = path_map.tokenize(deleted_path);
512 let deleted = path_map.tokenize(deleted_path);
513 match &mut p1_copies {
513
514 None => (),
514 let p1_entry = match &mut p1_copies {
515 Some(copies) => {
515 None => None,
516 copies.entry(deleted).and_modify(|old| {
516 Some(copies) => match copies.entry(deleted) {
517 old.mark_delete(current_rev);
517 Entry::Occupied(e) => Some(e),
518 });
518 Entry::Vacant(_) => None,
519 }
519 },
520 };
521 let p2_entry = match &mut p2_copies {
522 None => None,
523 Some(copies) => match copies.entry(deleted) {
524 Entry::Occupied(e) => Some(e),
525 Entry::Vacant(_) => None,
526 },
520 };
527 };
521 match &mut p2_copies {
528
522 None => (),
529 match (p1_entry, p2_entry) {
523 Some(copies) => {
530 (None, None) => (),
524 copies.entry(deleted).and_modify(|old| {
531 (Some(mut e), None) => {
525 old.mark_delete(current_rev);
532 e.get_mut().mark_delete(current_rev)
526 });
527 }
533 }
528 };
534 (None, Some(mut e)) => {
535 e.get_mut().mark_delete(current_rev)
536 }
537 (Some(mut e1), Some(mut e2)) => {
538 e1.get_mut().mark_delete(current_rev);
539 e2.get_mut().mark_delete(current_rev);
540 }
541 }
529 }
542 }
530 }
543 }
531 }
544 }
532 (p1_copies, p2_copies)
545 (p1_copies, p2_copies)
533 }
546 }
534
547
535 // insert one new copy information in an InternalPathCopies
548 // insert one new copy information in an InternalPathCopies
536 //
549 //
537 // This deal with chaining and overwrite.
550 // This deal with chaining and overwrite.
538 fn add_one_copy(
551 fn add_one_copy(
539 current_rev: Revision,
552 current_rev: Revision,
540 path_map: &mut TwoWayPathMap,
553 path_map: &mut TwoWayPathMap,
541 copies: &mut InternalPathCopies,
554 copies: &mut InternalPathCopies,
542 base_copies: &InternalPathCopies,
555 base_copies: &InternalPathCopies,
543 path_dest: &HgPath,
556 path_dest: &HgPath,
544 path_source: &HgPath,
557 path_source: &HgPath,
545 ) {
558 ) {
546 let dest = path_map.tokenize(path_dest);
559 let dest = path_map.tokenize(path_dest);
547 let source = path_map.tokenize(path_source);
560 let source = path_map.tokenize(path_source);
548 let entry;
561 let entry;
549 if let Some(v) = base_copies.get(&source) {
562 if let Some(v) = base_copies.get(&source) {
550 entry = match &v.path {
563 entry = match &v.path {
551 Some(path) => Some((*(path)).to_owned()),
564 Some(path) => Some((*(path)).to_owned()),
552 None => Some(source.to_owned()),
565 None => Some(source.to_owned()),
553 }
566 }
554 } else {
567 } else {
555 entry = Some(source.to_owned());
568 entry = Some(source.to_owned());
556 }
569 }
557 // Each new entry is introduced by the children, we
570 // Each new entry is introduced by the children, we
558 // record this information as we will need it to take
571 // record this information as we will need it to take
559 // the right decision when merging conflicting copy
572 // the right decision when merging conflicting copy
560 // information. See merge_copies_dict for details.
573 // information. See merge_copies_dict for details.
561 match copies.entry(dest) {
574 match copies.entry(dest) {
562 Entry::Vacant(slot) => {
575 Entry::Vacant(slot) => {
563 let ttpc = CopySource::new(current_rev, entry);
576 let ttpc = CopySource::new(current_rev, entry);
564 slot.insert(ttpc);
577 slot.insert(ttpc);
565 }
578 }
566 Entry::Occupied(mut slot) => {
579 Entry::Occupied(mut slot) => {
567 let ttpc = slot.get_mut();
580 let ttpc = slot.get_mut();
568 ttpc.overwrite(current_rev, entry);
581 ttpc.overwrite(current_rev, entry);
569 }
582 }
570 }
583 }
571 }
584 }
572
585
573 /// merge two copies-mapping together, minor and major
586 /// merge two copies-mapping together, minor and major
574 ///
587 ///
575 /// In case of conflict, value from "major" will be picked, unless in some
588 /// In case of conflict, value from "major" will be picked, unless in some
576 /// cases. See inline documentation for details.
589 /// cases. See inline documentation for details.
577 fn merge_copies_dict(
590 fn merge_copies_dict(
578 path_map: &TwoWayPathMap,
591 path_map: &TwoWayPathMap,
579 current_merge: Revision,
592 current_merge: Revision,
580 mut minor: InternalPathCopies,
593 mut minor: InternalPathCopies,
581 mut major: InternalPathCopies,
594 mut major: InternalPathCopies,
582 changes: &ChangedFiles,
595 changes: &ChangedFiles,
583 ) -> InternalPathCopies {
596 ) -> InternalPathCopies {
584 // This closure exist as temporary help while multiple developper are
597 // This closure exist as temporary help while multiple developper are
585 // actively working on this code. Feel free to re-inline it once this
598 // actively working on this code. Feel free to re-inline it once this
586 // code is more settled.
599 // code is more settled.
587 let cmp_value =
600 let cmp_value =
588 |dest: &PathToken, src_minor: &CopySource, src_major: &CopySource| {
601 |dest: &PathToken, src_minor: &CopySource, src_major: &CopySource| {
589 compare_value(
602 compare_value(
590 path_map,
603 path_map,
591 current_merge,
604 current_merge,
592 changes,
605 changes,
593 dest,
606 dest,
594 src_minor,
607 src_minor,
595 src_major,
608 src_major,
596 )
609 )
597 };
610 };
598 if minor.is_empty() {
611 if minor.is_empty() {
599 major
612 major
600 } else if major.is_empty() {
613 } else if major.is_empty() {
601 minor
614 minor
602 } else if minor.len() * 2 < major.len() {
615 } else if minor.len() * 2 < major.len() {
603 // Lets says we are merging two InternalPathCopies instance A and B.
616 // Lets says we are merging two InternalPathCopies instance A and B.
604 //
617 //
605 // If A contains N items, the merge result will never contains more
618 // If A contains N items, the merge result will never contains more
606 // than N values differents than the one in A
619 // than N values differents than the one in A
607 //
620 //
608 // If B contains M items, with M > N, the merge result will always
621 // If B contains M items, with M > N, the merge result will always
609 // result in a minimum of M - N value differents than the on in
622 // result in a minimum of M - N value differents than the on in
610 // A
623 // A
611 //
624 //
612 // As a result, if N < (M-N), we know that simply iterating over A will
625 // As a result, if N < (M-N), we know that simply iterating over A will
613 // yield less difference than iterating over the difference
626 // yield less difference than iterating over the difference
614 // between A and B.
627 // between A and B.
615 //
628 //
616 // This help performance a lot in case were a tiny
629 // This help performance a lot in case were a tiny
617 // InternalPathCopies is merged with a much larger one.
630 // InternalPathCopies is merged with a much larger one.
618 for (dest, src_minor) in minor {
631 for (dest, src_minor) in minor {
619 let src_major = major.get(&dest);
632 let src_major = major.get(&dest);
620 match src_major {
633 match src_major {
621 None => {
634 None => {
622 major.insert(dest, src_minor);
635 major.insert(dest, src_minor);
623 }
636 }
624 Some(src_major) => {
637 Some(src_major) => {
625 let (pick, overwrite) =
638 let (pick, overwrite) =
626 cmp_value(&dest, &src_minor, src_major);
639 cmp_value(&dest, &src_minor, src_major);
627 if overwrite {
640 if overwrite {
628 let src = match pick {
641 let src = match pick {
629 MergePick::Major => CopySource::new_from_merge(
642 MergePick::Major => CopySource::new_from_merge(
630 current_merge,
643 current_merge,
631 src_major,
644 src_major,
632 &src_minor,
645 &src_minor,
633 ),
646 ),
634 MergePick::Minor => CopySource::new_from_merge(
647 MergePick::Minor => CopySource::new_from_merge(
635 current_merge,
648 current_merge,
636 &src_minor,
649 &src_minor,
637 src_major,
650 src_major,
638 ),
651 ),
639 MergePick::Any => CopySource::new_from_merge(
652 MergePick::Any => CopySource::new_from_merge(
640 current_merge,
653 current_merge,
641 src_major,
654 src_major,
642 &src_minor,
655 &src_minor,
643 ),
656 ),
644 };
657 };
645 major.insert(dest, src);
658 major.insert(dest, src);
646 } else {
659 } else {
647 match pick {
660 match pick {
648 MergePick::Any | MergePick::Major => None,
661 MergePick::Any | MergePick::Major => None,
649 MergePick::Minor => major.insert(dest, src_minor),
662 MergePick::Minor => major.insert(dest, src_minor),
650 };
663 };
651 }
664 }
652 }
665 }
653 };
666 };
654 }
667 }
655 major
668 major
656 } else if major.len() * 2 < minor.len() {
669 } else if major.len() * 2 < minor.len() {
657 // This use the same rational than the previous block.
670 // This use the same rational than the previous block.
658 // (Check previous block documentation for details.)
671 // (Check previous block documentation for details.)
659 for (dest, src_major) in major {
672 for (dest, src_major) in major {
660 let src_minor = minor.get(&dest);
673 let src_minor = minor.get(&dest);
661 match src_minor {
674 match src_minor {
662 None => {
675 None => {
663 minor.insert(dest, src_major);
676 minor.insert(dest, src_major);
664 }
677 }
665 Some(src_minor) => {
678 Some(src_minor) => {
666 let (pick, overwrite) =
679 let (pick, overwrite) =
667 cmp_value(&dest, src_minor, &src_major);
680 cmp_value(&dest, src_minor, &src_major);
668 if overwrite {
681 if overwrite {
669 let src = match pick {
682 let src = match pick {
670 MergePick::Major => CopySource::new_from_merge(
683 MergePick::Major => CopySource::new_from_merge(
671 current_merge,
684 current_merge,
672 &src_major,
685 &src_major,
673 src_minor,
686 src_minor,
674 ),
687 ),
675 MergePick::Minor => CopySource::new_from_merge(
688 MergePick::Minor => CopySource::new_from_merge(
676 current_merge,
689 current_merge,
677 src_minor,
690 src_minor,
678 &src_major,
691 &src_major,
679 ),
692 ),
680 MergePick::Any => CopySource::new_from_merge(
693 MergePick::Any => CopySource::new_from_merge(
681 current_merge,
694 current_merge,
682 &src_major,
695 &src_major,
683 src_minor,
696 src_minor,
684 ),
697 ),
685 };
698 };
686 minor.insert(dest, src);
699 minor.insert(dest, src);
687 } else {
700 } else {
688 match pick {
701 match pick {
689 MergePick::Any | MergePick::Minor => None,
702 MergePick::Any | MergePick::Minor => None,
690 MergePick::Major => minor.insert(dest, src_major),
703 MergePick::Major => minor.insert(dest, src_major),
691 };
704 };
692 }
705 }
693 }
706 }
694 };
707 };
695 }
708 }
696 minor
709 minor
697 } else {
710 } else {
698 let mut override_minor = Vec::new();
711 let mut override_minor = Vec::new();
699 let mut override_major = Vec::new();
712 let mut override_major = Vec::new();
700
713
701 let mut to_major = |k: &PathToken, v: &CopySource| {
714 let mut to_major = |k: &PathToken, v: &CopySource| {
702 override_major.push((k.clone(), v.clone()))
715 override_major.push((k.clone(), v.clone()))
703 };
716 };
704 let mut to_minor = |k: &PathToken, v: &CopySource| {
717 let mut to_minor = |k: &PathToken, v: &CopySource| {
705 override_minor.push((k.clone(), v.clone()))
718 override_minor.push((k.clone(), v.clone()))
706 };
719 };
707
720
708 // The diff function leverage detection of the identical subpart if
721 // The diff function leverage detection of the identical subpart if
709 // minor and major has some common ancestors. This make it very
722 // minor and major has some common ancestors. This make it very
710 // fast is most case.
723 // fast is most case.
711 //
724 //
712 // In case where the two map are vastly different in size, the current
725 // In case where the two map are vastly different in size, the current
713 // approach is still slowish because the iteration will iterate over
726 // approach is still slowish because the iteration will iterate over
714 // all the "exclusive" content of the larger on. This situation can be
727 // all the "exclusive" content of the larger on. This situation can be
715 // frequent when the subgraph of revision we are processing has a lot
728 // frequent when the subgraph of revision we are processing has a lot
716 // of roots. Each roots adding they own fully new map to the mix (and
729 // of roots. Each roots adding they own fully new map to the mix (and
717 // likely a small map, if the path from the root to the "main path" is
730 // likely a small map, if the path from the root to the "main path" is
718 // small.
731 // small.
719 //
732 //
720 // We could do better by detecting such situation and processing them
733 // We could do better by detecting such situation and processing them
721 // differently.
734 // differently.
722 for d in minor.diff(&major) {
735 for d in minor.diff(&major) {
723 match d {
736 match d {
724 DiffItem::Add(k, v) => to_minor(k, v),
737 DiffItem::Add(k, v) => to_minor(k, v),
725 DiffItem::Remove(k, v) => to_major(k, v),
738 DiffItem::Remove(k, v) => to_major(k, v),
726 DiffItem::Update { old, new } => {
739 DiffItem::Update { old, new } => {
727 let (dest, src_major) = new;
740 let (dest, src_major) = new;
728 let (_, src_minor) = old;
741 let (_, src_minor) = old;
729 let (pick, overwrite) =
742 let (pick, overwrite) =
730 cmp_value(dest, src_minor, src_major);
743 cmp_value(dest, src_minor, src_major);
731 if overwrite {
744 if overwrite {
732 let src = match pick {
745 let src = match pick {
733 MergePick::Major => CopySource::new_from_merge(
746 MergePick::Major => CopySource::new_from_merge(
734 current_merge,
747 current_merge,
735 src_major,
748 src_major,
736 src_minor,
749 src_minor,
737 ),
750 ),
738 MergePick::Minor => CopySource::new_from_merge(
751 MergePick::Minor => CopySource::new_from_merge(
739 current_merge,
752 current_merge,
740 src_minor,
753 src_minor,
741 src_major,
754 src_major,
742 ),
755 ),
743 MergePick::Any => CopySource::new_from_merge(
756 MergePick::Any => CopySource::new_from_merge(
744 current_merge,
757 current_merge,
745 src_major,
758 src_major,
746 src_minor,
759 src_minor,
747 ),
760 ),
748 };
761 };
749 to_minor(dest, &src);
762 to_minor(dest, &src);
750 to_major(dest, &src);
763 to_major(dest, &src);
751 } else {
764 } else {
752 match pick {
765 match pick {
753 MergePick::Major => to_minor(dest, src_major),
766 MergePick::Major => to_minor(dest, src_major),
754 MergePick::Minor => to_major(dest, src_minor),
767 MergePick::Minor => to_major(dest, src_minor),
755 // If the two entry are identical, no need to do
768 // If the two entry are identical, no need to do
756 // anything (but diff should not have yield them)
769 // anything (but diff should not have yield them)
757 MergePick::Any => unreachable!(),
770 MergePick::Any => unreachable!(),
758 }
771 }
759 }
772 }
760 }
773 }
761 };
774 };
762 }
775 }
763
776
764 let updates;
777 let updates;
765 let mut result;
778 let mut result;
766 if override_major.is_empty() {
779 if override_major.is_empty() {
767 result = major
780 result = major
768 } else if override_minor.is_empty() {
781 } else if override_minor.is_empty() {
769 result = minor
782 result = minor
770 } else {
783 } else {
771 if override_minor.len() < override_major.len() {
784 if override_minor.len() < override_major.len() {
772 updates = override_minor;
785 updates = override_minor;
773 result = minor;
786 result = minor;
774 } else {
787 } else {
775 updates = override_major;
788 updates = override_major;
776 result = major;
789 result = major;
777 }
790 }
778 for (k, v) in updates {
791 for (k, v) in updates {
779 result.insert(k, v);
792 result.insert(k, v);
780 }
793 }
781 }
794 }
782 result
795 result
783 }
796 }
784 }
797 }
785
798
786 /// represent the side that should prevail when merging two
799 /// represent the side that should prevail when merging two
787 /// InternalPathCopies
800 /// InternalPathCopies
788 enum MergePick {
801 enum MergePick {
789 /// The "major" (p1) side prevails
802 /// The "major" (p1) side prevails
790 Major,
803 Major,
791 /// The "minor" (p2) side prevails
804 /// The "minor" (p2) side prevails
792 Minor,
805 Minor,
793 /// Any side could be used (because they are the same)
806 /// Any side could be used (because they are the same)
794 Any,
807 Any,
795 }
808 }
796
809
797 /// decide which side prevails in case of conflicting values
810 /// decide which side prevails in case of conflicting values
798 #[allow(clippy::if_same_then_else)]
811 #[allow(clippy::if_same_then_else)]
799 fn compare_value(
812 fn compare_value(
800 path_map: &TwoWayPathMap,
813 path_map: &TwoWayPathMap,
801 current_merge: Revision,
814 current_merge: Revision,
802 changes: &ChangedFiles,
815 changes: &ChangedFiles,
803 dest: &PathToken,
816 dest: &PathToken,
804 src_minor: &CopySource,
817 src_minor: &CopySource,
805 src_major: &CopySource,
818 src_major: &CopySource,
806 ) -> (MergePick, bool) {
819 ) -> (MergePick, bool) {
807 if src_major.rev == current_merge {
820 if src_major.rev == current_merge {
808 if src_minor.rev == current_merge {
821 if src_minor.rev == current_merge {
809 if src_major.path.is_none() {
822 if src_major.path.is_none() {
810 // We cannot get different copy information for both p1 and p2
823 // We cannot get different copy information for both p1 and p2
811 // from the same revision. Unless this was a
824 // from the same revision. Unless this was a
812 // deletion.
825 // deletion.
813 //
826 //
814 // However the deletion might come over different data on each
827 // However the deletion might come over different data on each
815 // branch.
828 // branch.
816 let need_over = src_major.overwritten != src_minor.overwritten;
829 let need_over = src_major.overwritten != src_minor.overwritten;
817 (MergePick::Any, need_over)
830 (MergePick::Any, need_over)
818 } else {
831 } else {
819 unreachable!();
832 unreachable!();
820 }
833 }
821 } else {
834 } else {
822 // The last value comes the current merge, this value -will- win
835 // The last value comes the current merge, this value -will- win
823 // eventually.
836 // eventually.
824 (MergePick::Major, true)
837 (MergePick::Major, true)
825 }
838 }
826 } else if src_minor.rev == current_merge {
839 } else if src_minor.rev == current_merge {
827 // The last value comes the current merge, this value -will- win
840 // The last value comes the current merge, this value -will- win
828 // eventually.
841 // eventually.
829 (MergePick::Minor, true)
842 (MergePick::Minor, true)
830 } else if src_major.path == src_minor.path {
843 } else if src_major.path == src_minor.path {
831 // we have the same value, but from other source;
844 // we have the same value, but from other source;
832 if src_major.rev == src_minor.rev {
845 if src_major.rev == src_minor.rev {
833 // If the two entry are identical, they are both valid
846 // If the two entry are identical, they are both valid
834 debug_assert!(src_minor.overwritten == src_minor.overwritten);
847 debug_assert!(src_minor.overwritten == src_minor.overwritten);
835 (MergePick::Any, false)
848 (MergePick::Any, false)
836 } else if src_major.is_overwritten_by(src_minor) {
849 } else if src_major.is_overwritten_by(src_minor) {
837 (MergePick::Minor, false)
850 (MergePick::Minor, false)
838 } else if src_minor.is_overwritten_by(src_major) {
851 } else if src_minor.is_overwritten_by(src_major) {
839 (MergePick::Major, false)
852 (MergePick::Major, false)
840 } else {
853 } else {
841 (MergePick::Any, true)
854 (MergePick::Any, true)
842 }
855 }
843 } else if src_major.rev == src_minor.rev {
856 } else if src_major.rev == src_minor.rev {
844 // We cannot get copy information for both p1 and p2 in the
857 // We cannot get copy information for both p1 and p2 in the
845 // same rev. So this is the same value.
858 // same rev. So this is the same value.
846 unreachable!(
859 unreachable!(
847 "conflicting information from p1 and p2 in the same revision"
860 "conflicting information from p1 and p2 in the same revision"
848 );
861 );
849 } else {
862 } else {
850 let dest_path = path_map.untokenize(*dest);
863 let dest_path = path_map.untokenize(*dest);
851 let action = changes.get_merge_case(dest_path);
864 let action = changes.get_merge_case(dest_path);
852 if src_minor.path.is_some()
865 if src_minor.path.is_some()
853 && src_major.path.is_none()
866 && src_major.path.is_none()
854 && action == MergeCase::Salvaged
867 && action == MergeCase::Salvaged
855 {
868 {
856 // If the file is "deleted" in the major side but was
869 // If the file is "deleted" in the major side but was
857 // salvaged by the merge, we keep the minor side alive
870 // salvaged by the merge, we keep the minor side alive
858 (MergePick::Minor, true)
871 (MergePick::Minor, true)
859 } else if src_major.path.is_some()
872 } else if src_major.path.is_some()
860 && src_minor.path.is_none()
873 && src_minor.path.is_none()
861 && action == MergeCase::Salvaged
874 && action == MergeCase::Salvaged
862 {
875 {
863 // If the file is "deleted" in the minor side but was
876 // If the file is "deleted" in the minor side but was
864 // salvaged by the merge, unconditionnaly preserve the
877 // salvaged by the merge, unconditionnaly preserve the
865 // major side.
878 // major side.
866 (MergePick::Major, true)
879 (MergePick::Major, true)
867 } else if src_minor.is_overwritten_by(src_major) {
880 } else if src_minor.is_overwritten_by(src_major) {
868 // The information from the minor version are strictly older than
881 // The information from the minor version are strictly older than
869 // the major version
882 // the major version
870 if action == MergeCase::Merged {
883 if action == MergeCase::Merged {
871 // If the file was actively merged, its means some non-copy
884 // If the file was actively merged, its means some non-copy
872 // activity happened on the other branch. It
885 // activity happened on the other branch. It
873 // mean the older copy information are still relevant.
886 // mean the older copy information are still relevant.
874 //
887 //
875 // The major side wins such conflict.
888 // The major side wins such conflict.
876 (MergePick::Major, true)
889 (MergePick::Major, true)
877 } else {
890 } else {
878 // No activity on the minor branch, pick the newer one.
891 // No activity on the minor branch, pick the newer one.
879 (MergePick::Major, false)
892 (MergePick::Major, false)
880 }
893 }
881 } else if src_major.is_overwritten_by(src_minor) {
894 } else if src_major.is_overwritten_by(src_minor) {
882 if action == MergeCase::Merged {
895 if action == MergeCase::Merged {
883 // If the file was actively merged, its means some non-copy
896 // If the file was actively merged, its means some non-copy
884 // activity happened on the other branch. It
897 // activity happened on the other branch. It
885 // mean the older copy information are still relevant.
898 // mean the older copy information are still relevant.
886 //
899 //
887 // The major side wins such conflict.
900 // The major side wins such conflict.
888 (MergePick::Major, true)
901 (MergePick::Major, true)
889 } else {
902 } else {
890 // No activity on the minor branch, pick the newer one.
903 // No activity on the minor branch, pick the newer one.
891 (MergePick::Minor, false)
904 (MergePick::Minor, false)
892 }
905 }
893 } else if src_minor.path.is_none() {
906 } else if src_minor.path.is_none() {
894 // the minor side has no relevant information, pick the alive one
907 // the minor side has no relevant information, pick the alive one
895 (MergePick::Major, true)
908 (MergePick::Major, true)
896 } else if src_major.path.is_none() {
909 } else if src_major.path.is_none() {
897 // the major side has no relevant information, pick the alive one
910 // the major side has no relevant information, pick the alive one
898 (MergePick::Minor, true)
911 (MergePick::Minor, true)
899 } else {
912 } else {
900 // by default the major side wins
913 // by default the major side wins
901 (MergePick::Major, true)
914 (MergePick::Major, true)
902 }
915 }
903 }
916 }
904 }
917 }
General Comments 0
You need to be logged in to leave comments. Login now