##// END OF EJS Templates
copies-rust: implement PartialEqual manually...
marmoute -
r47324:b0a3ca02 default
parent child Browse files
Show More
@@ -1,932 +1,947 b''
1 use crate::utils::hg_path::HgPath;
1 use crate::utils::hg_path::HgPath;
2 use crate::utils::hg_path::HgPathBuf;
2 use crate::utils::hg_path::HgPathBuf;
3 use crate::Revision;
3 use crate::Revision;
4 use crate::NULL_REVISION;
4 use crate::NULL_REVISION;
5
5
6 use im_rc::ordmap::DiffItem;
6 use im_rc::ordmap::DiffItem;
7 use im_rc::ordmap::Entry;
7 use im_rc::ordmap::Entry;
8 use im_rc::ordmap::OrdMap;
8 use im_rc::ordmap::OrdMap;
9
9
10 use std::cmp::Ordering;
10 use std::cmp::Ordering;
11 use std::collections::HashMap;
11 use std::collections::HashMap;
12 use std::collections::HashSet;
12 use std::collections::HashSet;
13 use std::convert::TryInto;
13 use std::convert::TryInto;
14
14
15 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
15 pub type PathCopies = HashMap<HgPathBuf, HgPathBuf>;
16
16
17 type PathToken = usize;
17 type PathToken = usize;
18
18
19 #[derive(Clone, Debug, PartialEq)]
19 #[derive(Clone, Debug)]
20 struct CopySource {
20 struct CopySource {
21 /// revision at which the copy information was added
21 /// revision at which the copy information was added
22 rev: Revision,
22 rev: Revision,
23 /// the copy source, (Set to None in case of deletion of the associated
23 /// the copy source, (Set to None in case of deletion of the associated
24 /// key)
24 /// key)
25 path: Option<PathToken>,
25 path: Option<PathToken>,
26 /// a set of previous `CopySource.rev` value directly or indirectly
26 /// a set of previous `CopySource.rev` value directly or indirectly
27 /// overwritten by this one.
27 /// overwritten by this one.
28 overwritten: HashSet<Revision>,
28 overwritten: HashSet<Revision>,
29 }
29 }
30
30
31 impl CopySource {
31 impl CopySource {
32 /// create a new CopySource
32 /// create a new CopySource
33 ///
33 ///
34 /// Use this when no previous copy source existed.
34 /// Use this when no previous copy source existed.
35 fn new(rev: Revision, path: Option<PathToken>) -> Self {
35 fn new(rev: Revision, path: Option<PathToken>) -> Self {
36 Self {
36 Self {
37 rev,
37 rev,
38 path,
38 path,
39 overwritten: HashSet::new(),
39 overwritten: HashSet::new(),
40 }
40 }
41 }
41 }
42
42
43 /// create a new CopySource from merging two others
43 /// create a new CopySource from merging two others
44 ///
44 ///
45 /// Use this when merging two InternalPathCopies requires active merging of
45 /// Use this when merging two InternalPathCopies requires active merging of
46 /// some entries.
46 /// some entries.
47 fn new_from_merge(rev: Revision, winner: &Self, loser: &Self) -> Self {
47 fn new_from_merge(rev: Revision, winner: &Self, loser: &Self) -> Self {
48 let mut overwritten = HashSet::new();
48 let mut overwritten = HashSet::new();
49 overwritten.extend(winner.overwritten.iter().copied());
49 overwritten.extend(winner.overwritten.iter().copied());
50 overwritten.extend(loser.overwritten.iter().copied());
50 overwritten.extend(loser.overwritten.iter().copied());
51 overwritten.insert(winner.rev);
51 overwritten.insert(winner.rev);
52 overwritten.insert(loser.rev);
52 overwritten.insert(loser.rev);
53 Self {
53 Self {
54 rev,
54 rev,
55 path: winner.path,
55 path: winner.path,
56 overwritten: overwritten,
56 overwritten: overwritten,
57 }
57 }
58 }
58 }
59
59
60 /// Update the value of a pre-existing CopySource
60 /// Update the value of a pre-existing CopySource
61 ///
61 ///
62 /// Use this when recording copy information from parent → child edges
62 /// Use this when recording copy information from parent → child edges
63 fn overwrite(&mut self, rev: Revision, path: Option<PathToken>) {
63 fn overwrite(&mut self, rev: Revision, path: Option<PathToken>) {
64 self.overwritten.insert(self.rev);
64 self.overwritten.insert(self.rev);
65 self.rev = rev;
65 self.rev = rev;
66 self.path = path;
66 self.path = path;
67 }
67 }
68
68
69 /// Mark pre-existing copy information as "dropped" by a file deletion
69 /// Mark pre-existing copy information as "dropped" by a file deletion
70 ///
70 ///
71 /// Use this when recording copy information from parent → child edges
71 /// Use this when recording copy information from parent → child edges
72 fn mark_delete(&mut self, rev: Revision) {
72 fn mark_delete(&mut self, rev: Revision) {
73 self.overwritten.insert(self.rev);
73 self.overwritten.insert(self.rev);
74 self.rev = rev;
74 self.rev = rev;
75 self.path = None;
75 self.path = None;
76 }
76 }
77
77
78 /// Mark pre-existing copy information as "dropped" by a file deletion
78 /// Mark pre-existing copy information as "dropped" by a file deletion
79 ///
79 ///
80 /// Use this when recording copy information from parent → child edges
80 /// Use this when recording copy information from parent → child edges
81 fn mark_delete_with_pair(&mut self, rev: Revision, other: &Self) {
81 fn mark_delete_with_pair(&mut self, rev: Revision, other: &Self) {
82 self.overwritten.insert(self.rev);
82 self.overwritten.insert(self.rev);
83 if other.rev != rev {
83 if other.rev != rev {
84 self.overwritten.insert(other.rev);
84 self.overwritten.insert(other.rev);
85 }
85 }
86 self.overwritten.extend(other.overwritten.iter().copied());
86 self.overwritten.extend(other.overwritten.iter().copied());
87 self.rev = rev;
87 self.rev = rev;
88 self.path = None;
88 self.path = None;
89 }
89 }
90
90
91 fn is_overwritten_by(&self, other: &Self) -> bool {
91 fn is_overwritten_by(&self, other: &Self) -> bool {
92 other.overwritten.contains(&self.rev)
92 other.overwritten.contains(&self.rev)
93 }
93 }
94 }
94 }
95
95
96 // For the same "dest", content generated for a given revision will always be
97 // the same.
98 impl PartialEq for CopySource {
99 fn eq(&self, other: &Self) -> bool {
100 #[cfg(debug_assertions)]
101 {
102 if self.rev == other.rev {
103 debug_assert!(self.path == other.path);
104 debug_assert!(self.overwritten == other.overwritten);
105 }
106 }
107 self.rev == other.rev
108 }
109 }
110
96 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
111 /// maps CopyDestination to Copy Source (+ a "timestamp" for the operation)
97 type InternalPathCopies = OrdMap<PathToken, CopySource>;
112 type InternalPathCopies = OrdMap<PathToken, CopySource>;
98
113
99 /// hold parent 1, parent 2 and relevant files actions.
114 /// hold parent 1, parent 2 and relevant files actions.
100 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
115 pub type RevInfo<'a> = (Revision, Revision, ChangedFiles<'a>);
101
116
102 /// represent the files affected by a changesets
117 /// represent the files affected by a changesets
103 ///
118 ///
104 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
119 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
105 /// all the data categories tracked by it.
120 /// all the data categories tracked by it.
106 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
121 /// This hold a subset of mercurial.metadata.ChangingFiles as we do not need
107 /// all the data categories tracked by it.
122 /// all the data categories tracked by it.
108 pub struct ChangedFiles<'a> {
123 pub struct ChangedFiles<'a> {
109 nb_items: u32,
124 nb_items: u32,
110 index: &'a [u8],
125 index: &'a [u8],
111 data: &'a [u8],
126 data: &'a [u8],
112 }
127 }
113
128
114 /// Represent active changes that affect the copy tracing.
129 /// Represent active changes that affect the copy tracing.
115 enum Action<'a> {
130 enum Action<'a> {
116 /// The parent ? children edge is removing a file
131 /// The parent ? children edge is removing a file
117 ///
132 ///
118 /// (actually, this could be the edge from the other parent, but it does
133 /// (actually, this could be the edge from the other parent, but it does
119 /// not matters)
134 /// not matters)
120 Removed(&'a HgPath),
135 Removed(&'a HgPath),
121 /// The parent ? children edge introduce copy information between (dest,
136 /// The parent ? children edge introduce copy information between (dest,
122 /// source)
137 /// source)
123 CopiedFromP1(&'a HgPath, &'a HgPath),
138 CopiedFromP1(&'a HgPath, &'a HgPath),
124 CopiedFromP2(&'a HgPath, &'a HgPath),
139 CopiedFromP2(&'a HgPath, &'a HgPath),
125 }
140 }
126
141
127 /// This express the possible "special" case we can get in a merge
142 /// This express the possible "special" case we can get in a merge
128 ///
143 ///
129 /// See mercurial/metadata.py for details on these values.
144 /// See mercurial/metadata.py for details on these values.
130 #[derive(PartialEq)]
145 #[derive(PartialEq)]
131 enum MergeCase {
146 enum MergeCase {
132 /// Merged: file had history on both side that needed to be merged
147 /// Merged: file had history on both side that needed to be merged
133 Merged,
148 Merged,
134 /// Salvaged: file was candidate for deletion, but survived the merge
149 /// Salvaged: file was candidate for deletion, but survived the merge
135 Salvaged,
150 Salvaged,
136 /// Normal: Not one of the two cases above
151 /// Normal: Not one of the two cases above
137 Normal,
152 Normal,
138 }
153 }
139
154
140 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
155 type FileChange<'a> = (u8, &'a HgPath, &'a HgPath);
141
156
142 const EMPTY: &[u8] = b"";
157 const EMPTY: &[u8] = b"";
143 const COPY_MASK: u8 = 3;
158 const COPY_MASK: u8 = 3;
144 const P1_COPY: u8 = 2;
159 const P1_COPY: u8 = 2;
145 const P2_COPY: u8 = 3;
160 const P2_COPY: u8 = 3;
146 const ACTION_MASK: u8 = 28;
161 const ACTION_MASK: u8 = 28;
147 const REMOVED: u8 = 12;
162 const REMOVED: u8 = 12;
148 const MERGED: u8 = 8;
163 const MERGED: u8 = 8;
149 const SALVAGED: u8 = 16;
164 const SALVAGED: u8 = 16;
150
165
151 impl<'a> ChangedFiles<'a> {
166 impl<'a> ChangedFiles<'a> {
152 const INDEX_START: usize = 4;
167 const INDEX_START: usize = 4;
153 const ENTRY_SIZE: u32 = 9;
168 const ENTRY_SIZE: u32 = 9;
154 const FILENAME_START: u32 = 1;
169 const FILENAME_START: u32 = 1;
155 const COPY_SOURCE_START: u32 = 5;
170 const COPY_SOURCE_START: u32 = 5;
156
171
157 pub fn new(data: &'a [u8]) -> Self {
172 pub fn new(data: &'a [u8]) -> Self {
158 assert!(
173 assert!(
159 data.len() >= 4,
174 data.len() >= 4,
160 "data size ({}) is too small to contain the header (4)",
175 "data size ({}) is too small to contain the header (4)",
161 data.len()
176 data.len()
162 );
177 );
163 let nb_items_raw: [u8; 4] = (&data[0..=3])
178 let nb_items_raw: [u8; 4] = (&data[0..=3])
164 .try_into()
179 .try_into()
165 .expect("failed to turn 4 bytes into 4 bytes");
180 .expect("failed to turn 4 bytes into 4 bytes");
166 let nb_items = u32::from_be_bytes(nb_items_raw);
181 let nb_items = u32::from_be_bytes(nb_items_raw);
167
182
168 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
183 let index_size = (nb_items * Self::ENTRY_SIZE) as usize;
169 let index_end = Self::INDEX_START + index_size;
184 let index_end = Self::INDEX_START + index_size;
170
185
171 assert!(
186 assert!(
172 data.len() >= index_end,
187 data.len() >= index_end,
173 "data size ({}) is too small to fit the index_data ({})",
188 "data size ({}) is too small to fit the index_data ({})",
174 data.len(),
189 data.len(),
175 index_end
190 index_end
176 );
191 );
177
192
178 let ret = ChangedFiles {
193 let ret = ChangedFiles {
179 nb_items,
194 nb_items,
180 index: &data[Self::INDEX_START..index_end],
195 index: &data[Self::INDEX_START..index_end],
181 data: &data[index_end..],
196 data: &data[index_end..],
182 };
197 };
183 let max_data = ret.filename_end(nb_items - 1) as usize;
198 let max_data = ret.filename_end(nb_items - 1) as usize;
184 assert!(
199 assert!(
185 ret.data.len() >= max_data,
200 ret.data.len() >= max_data,
186 "data size ({}) is too small to fit all data ({})",
201 "data size ({}) is too small to fit all data ({})",
187 data.len(),
202 data.len(),
188 index_end + max_data
203 index_end + max_data
189 );
204 );
190 ret
205 ret
191 }
206 }
192
207
193 pub fn new_empty() -> Self {
208 pub fn new_empty() -> Self {
194 ChangedFiles {
209 ChangedFiles {
195 nb_items: 0,
210 nb_items: 0,
196 index: EMPTY,
211 index: EMPTY,
197 data: EMPTY,
212 data: EMPTY,
198 }
213 }
199 }
214 }
200
215
201 /// internal function to return an individual entry at a given index
216 /// internal function to return an individual entry at a given index
202 fn entry(&'a self, idx: u32) -> FileChange<'a> {
217 fn entry(&'a self, idx: u32) -> FileChange<'a> {
203 if idx >= self.nb_items {
218 if idx >= self.nb_items {
204 panic!(
219 panic!(
205 "index for entry is higher that the number of file {} >= {}",
220 "index for entry is higher that the number of file {} >= {}",
206 idx, self.nb_items
221 idx, self.nb_items
207 )
222 )
208 }
223 }
209 let flags = self.flags(idx);
224 let flags = self.flags(idx);
210 let filename = self.filename(idx);
225 let filename = self.filename(idx);
211 let copy_idx = self.copy_idx(idx);
226 let copy_idx = self.copy_idx(idx);
212 let copy_source = self.filename(copy_idx);
227 let copy_source = self.filename(copy_idx);
213 (flags, filename, copy_source)
228 (flags, filename, copy_source)
214 }
229 }
215
230
216 /// internal function to return the filename of the entry at a given index
231 /// internal function to return the filename of the entry at a given index
217 fn filename(&self, idx: u32) -> &HgPath {
232 fn filename(&self, idx: u32) -> &HgPath {
218 let filename_start;
233 let filename_start;
219 if idx == 0 {
234 if idx == 0 {
220 filename_start = 0;
235 filename_start = 0;
221 } else {
236 } else {
222 filename_start = self.filename_end(idx - 1)
237 filename_start = self.filename_end(idx - 1)
223 }
238 }
224 let filename_end = self.filename_end(idx);
239 let filename_end = self.filename_end(idx);
225 let filename_start = filename_start as usize;
240 let filename_start = filename_start as usize;
226 let filename_end = filename_end as usize;
241 let filename_end = filename_end as usize;
227 HgPath::new(&self.data[filename_start..filename_end])
242 HgPath::new(&self.data[filename_start..filename_end])
228 }
243 }
229
244
230 /// internal function to return the flag field of the entry at a given
245 /// internal function to return the flag field of the entry at a given
231 /// index
246 /// index
232 fn flags(&self, idx: u32) -> u8 {
247 fn flags(&self, idx: u32) -> u8 {
233 let idx = idx as usize;
248 let idx = idx as usize;
234 self.index[idx * (Self::ENTRY_SIZE as usize)]
249 self.index[idx * (Self::ENTRY_SIZE as usize)]
235 }
250 }
236
251
237 /// internal function to return the end of a filename part at a given index
252 /// internal function to return the end of a filename part at a given index
238 fn filename_end(&self, idx: u32) -> u32 {
253 fn filename_end(&self, idx: u32) -> u32 {
239 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
254 let start = (idx * Self::ENTRY_SIZE) + Self::FILENAME_START;
240 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
255 let end = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
241 let start = start as usize;
256 let start = start as usize;
242 let end = end as usize;
257 let end = end as usize;
243 let raw = (&self.index[start..end])
258 let raw = (&self.index[start..end])
244 .try_into()
259 .try_into()
245 .expect("failed to turn 4 bytes into 4 bytes");
260 .expect("failed to turn 4 bytes into 4 bytes");
246 u32::from_be_bytes(raw)
261 u32::from_be_bytes(raw)
247 }
262 }
248
263
249 /// internal function to return index of the copy source of the entry at a
264 /// internal function to return index of the copy source of the entry at a
250 /// given index
265 /// given index
251 fn copy_idx(&self, idx: u32) -> u32 {
266 fn copy_idx(&self, idx: u32) -> u32 {
252 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
267 let start = (idx * Self::ENTRY_SIZE) + Self::COPY_SOURCE_START;
253 let end = (idx + 1) * Self::ENTRY_SIZE;
268 let end = (idx + 1) * Self::ENTRY_SIZE;
254 let start = start as usize;
269 let start = start as usize;
255 let end = end as usize;
270 let end = end as usize;
256 let raw = (&self.index[start..end])
271 let raw = (&self.index[start..end])
257 .try_into()
272 .try_into()
258 .expect("failed to turn 4 bytes into 4 bytes");
273 .expect("failed to turn 4 bytes into 4 bytes");
259 u32::from_be_bytes(raw)
274 u32::from_be_bytes(raw)
260 }
275 }
261
276
262 /// Return an iterator over all the `Action` in this instance.
277 /// Return an iterator over all the `Action` in this instance.
263 fn iter_actions(&self) -> ActionsIterator {
278 fn iter_actions(&self) -> ActionsIterator {
264 ActionsIterator {
279 ActionsIterator {
265 changes: &self,
280 changes: &self,
266 current: 0,
281 current: 0,
267 }
282 }
268 }
283 }
269
284
270 /// return the MergeCase value associated with a filename
285 /// return the MergeCase value associated with a filename
271 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
286 fn get_merge_case(&self, path: &HgPath) -> MergeCase {
272 if self.nb_items == 0 {
287 if self.nb_items == 0 {
273 return MergeCase::Normal;
288 return MergeCase::Normal;
274 }
289 }
275 let mut low_part = 0;
290 let mut low_part = 0;
276 let mut high_part = self.nb_items;
291 let mut high_part = self.nb_items;
277
292
278 while low_part < high_part {
293 while low_part < high_part {
279 let cursor = (low_part + high_part - 1) / 2;
294 let cursor = (low_part + high_part - 1) / 2;
280 let (flags, filename, _source) = self.entry(cursor);
295 let (flags, filename, _source) = self.entry(cursor);
281 match path.cmp(filename) {
296 match path.cmp(filename) {
282 Ordering::Less => low_part = cursor + 1,
297 Ordering::Less => low_part = cursor + 1,
283 Ordering::Greater => high_part = cursor,
298 Ordering::Greater => high_part = cursor,
284 Ordering::Equal => {
299 Ordering::Equal => {
285 return match flags & ACTION_MASK {
300 return match flags & ACTION_MASK {
286 MERGED => MergeCase::Merged,
301 MERGED => MergeCase::Merged,
287 SALVAGED => MergeCase::Salvaged,
302 SALVAGED => MergeCase::Salvaged,
288 _ => MergeCase::Normal,
303 _ => MergeCase::Normal,
289 };
304 };
290 }
305 }
291 }
306 }
292 }
307 }
293 MergeCase::Normal
308 MergeCase::Normal
294 }
309 }
295 }
310 }
296
311
297 struct ActionsIterator<'a> {
312 struct ActionsIterator<'a> {
298 changes: &'a ChangedFiles<'a>,
313 changes: &'a ChangedFiles<'a>,
299 current: u32,
314 current: u32,
300 }
315 }
301
316
302 impl<'a> Iterator for ActionsIterator<'a> {
317 impl<'a> Iterator for ActionsIterator<'a> {
303 type Item = Action<'a>;
318 type Item = Action<'a>;
304
319
305 fn next(&mut self) -> Option<Action<'a>> {
320 fn next(&mut self) -> Option<Action<'a>> {
306 while self.current < self.changes.nb_items {
321 while self.current < self.changes.nb_items {
307 let (flags, file, source) = self.changes.entry(self.current);
322 let (flags, file, source) = self.changes.entry(self.current);
308 self.current += 1;
323 self.current += 1;
309 if (flags & ACTION_MASK) == REMOVED {
324 if (flags & ACTION_MASK) == REMOVED {
310 return Some(Action::Removed(file));
325 return Some(Action::Removed(file));
311 }
326 }
312 let copy = flags & COPY_MASK;
327 let copy = flags & COPY_MASK;
313 if copy == P1_COPY {
328 if copy == P1_COPY {
314 return Some(Action::CopiedFromP1(file, source));
329 return Some(Action::CopiedFromP1(file, source));
315 } else if copy == P2_COPY {
330 } else if copy == P2_COPY {
316 return Some(Action::CopiedFromP2(file, source));
331 return Some(Action::CopiedFromP2(file, source));
317 }
332 }
318 }
333 }
319 return None;
334 return None;
320 }
335 }
321 }
336 }
322
337
323 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
338 /// A small struct whose purpose is to ensure lifetime of bytes referenced in
324 /// ChangedFiles
339 /// ChangedFiles
325 ///
340 ///
326 /// It is passed to the RevInfoMaker callback who can assign any necessary
341 /// It is passed to the RevInfoMaker callback who can assign any necessary
327 /// content to the `data` attribute. The copy tracing code is responsible for
342 /// content to the `data` attribute. The copy tracing code is responsible for
328 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
343 /// keeping the DataHolder alive at least as long as the ChangedFiles object.
329 pub struct DataHolder<D> {
344 pub struct DataHolder<D> {
330 /// RevInfoMaker callback should assign data referenced by the
345 /// RevInfoMaker callback should assign data referenced by the
331 /// ChangedFiles struct it return to this attribute. The DataHolder
346 /// ChangedFiles struct it return to this attribute. The DataHolder
332 /// lifetime will be at least as long as the ChangedFiles one.
347 /// lifetime will be at least as long as the ChangedFiles one.
333 pub data: Option<D>,
348 pub data: Option<D>,
334 }
349 }
335
350
336 pub type RevInfoMaker<'a, D> =
351 pub type RevInfoMaker<'a, D> =
337 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
352 Box<dyn for<'r> Fn(Revision, &'r mut DataHolder<D>) -> RevInfo<'r> + 'a>;
338
353
339 /// A small "tokenizer" responsible of turning full HgPath into lighter
354 /// A small "tokenizer" responsible of turning full HgPath into lighter
340 /// PathToken
355 /// PathToken
341 ///
356 ///
342 /// Dealing with small object, like integer is much faster, so HgPath input are
357 /// Dealing with small object, like integer is much faster, so HgPath input are
343 /// turned into integer "PathToken" and converted back in the end.
358 /// turned into integer "PathToken" and converted back in the end.
344 #[derive(Clone, Debug, Default)]
359 #[derive(Clone, Debug, Default)]
345 struct TwoWayPathMap {
360 struct TwoWayPathMap {
346 token: HashMap<HgPathBuf, PathToken>,
361 token: HashMap<HgPathBuf, PathToken>,
347 path: Vec<HgPathBuf>,
362 path: Vec<HgPathBuf>,
348 }
363 }
349
364
350 impl TwoWayPathMap {
365 impl TwoWayPathMap {
351 fn tokenize(&mut self, path: &HgPath) -> PathToken {
366 fn tokenize(&mut self, path: &HgPath) -> PathToken {
352 match self.token.get(path) {
367 match self.token.get(path) {
353 Some(a) => *a,
368 Some(a) => *a,
354 None => {
369 None => {
355 let a = self.token.len();
370 let a = self.token.len();
356 let buf = path.to_owned();
371 let buf = path.to_owned();
357 self.path.push(buf.clone());
372 self.path.push(buf.clone());
358 self.token.insert(buf, a);
373 self.token.insert(buf, a);
359 a
374 a
360 }
375 }
361 }
376 }
362 }
377 }
363
378
364 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
379 fn untokenize(&self, token: PathToken) -> &HgPathBuf {
365 assert!(token < self.path.len(), format!("Unknown token: {}", token));
380 assert!(token < self.path.len(), format!("Unknown token: {}", token));
366 &self.path[token]
381 &self.path[token]
367 }
382 }
368 }
383 }
369
384
370 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
385 /// Same as mercurial.copies._combine_changeset_copies, but in Rust.
371 ///
386 ///
372 /// Arguments are:
387 /// Arguments are:
373 ///
388 ///
374 /// revs: all revisions to be considered
389 /// revs: all revisions to be considered
375 /// children: a {parent ? [childrens]} mapping
390 /// children: a {parent ? [childrens]} mapping
376 /// target_rev: the final revision we are combining copies to
391 /// target_rev: the final revision we are combining copies to
377 /// rev_info(rev): callback to get revision information:
392 /// rev_info(rev): callback to get revision information:
378 /// * first parent
393 /// * first parent
379 /// * second parent
394 /// * second parent
380 /// * ChangedFiles
395 /// * ChangedFiles
381 /// isancestors(low_rev, high_rev): callback to check if a revision is an
396 /// isancestors(low_rev, high_rev): callback to check if a revision is an
382 /// ancestor of another
397 /// ancestor of another
383 pub fn combine_changeset_copies<D>(
398 pub fn combine_changeset_copies<D>(
384 revs: Vec<Revision>,
399 revs: Vec<Revision>,
385 mut children_count: HashMap<Revision, usize>,
400 mut children_count: HashMap<Revision, usize>,
386 target_rev: Revision,
401 target_rev: Revision,
387 rev_info: RevInfoMaker<D>,
402 rev_info: RevInfoMaker<D>,
388 ) -> PathCopies {
403 ) -> PathCopies {
389 let mut all_copies = HashMap::new();
404 let mut all_copies = HashMap::new();
390
405
391 let mut path_map = TwoWayPathMap::default();
406 let mut path_map = TwoWayPathMap::default();
392
407
393 for rev in revs {
408 for rev in revs {
394 let mut d: DataHolder<D> = DataHolder { data: None };
409 let mut d: DataHolder<D> = DataHolder { data: None };
395 let (p1, p2, changes) = rev_info(rev, &mut d);
410 let (p1, p2, changes) = rev_info(rev, &mut d);
396
411
397 // We will chain the copies information accumulated for the parent with
412 // We will chain the copies information accumulated for the parent with
398 // the individual copies information the curent revision. Creating a
413 // the individual copies information the curent revision. Creating a
399 // new TimeStampedPath for each `rev` → `children` vertex.
414 // new TimeStampedPath for each `rev` → `children` vertex.
400 // Retrieve data computed in a previous iteration
415 // Retrieve data computed in a previous iteration
401 let p1_copies = match p1 {
416 let p1_copies = match p1 {
402 NULL_REVISION => None,
417 NULL_REVISION => None,
403 _ => get_and_clean_parent_copies(
418 _ => get_and_clean_parent_copies(
404 &mut all_copies,
419 &mut all_copies,
405 &mut children_count,
420 &mut children_count,
406 p1,
421 p1,
407 ), // will be None if the vertex is not to be traversed
422 ), // will be None if the vertex is not to be traversed
408 };
423 };
409 let p2_copies = match p2 {
424 let p2_copies = match p2 {
410 NULL_REVISION => None,
425 NULL_REVISION => None,
411 _ => get_and_clean_parent_copies(
426 _ => get_and_clean_parent_copies(
412 &mut all_copies,
427 &mut all_copies,
413 &mut children_count,
428 &mut children_count,
414 p2,
429 p2,
415 ), // will be None if the vertex is not to be traversed
430 ), // will be None if the vertex is not to be traversed
416 };
431 };
417 // combine it with data for that revision
432 // combine it with data for that revision
418 let (p1_copies, p2_copies) =
433 let (p1_copies, p2_copies) =
419 chain_changes(&mut path_map, p1_copies, p2_copies, &changes, rev);
434 chain_changes(&mut path_map, p1_copies, p2_copies, &changes, rev);
420 let copies = match (p1_copies, p2_copies) {
435 let copies = match (p1_copies, p2_copies) {
421 (None, None) => None,
436 (None, None) => None,
422 (c, None) => c,
437 (c, None) => c,
423 (None, c) => c,
438 (None, c) => c,
424 (Some(p1_copies), Some(p2_copies)) => Some(merge_copies_dict(
439 (Some(p1_copies), Some(p2_copies)) => Some(merge_copies_dict(
425 &path_map, rev, p2_copies, p1_copies, &changes,
440 &path_map, rev, p2_copies, p1_copies, &changes,
426 )),
441 )),
427 };
442 };
428 if let Some(c) = copies {
443 if let Some(c) = copies {
429 all_copies.insert(rev, c);
444 all_copies.insert(rev, c);
430 }
445 }
431 }
446 }
432
447
433 // Drop internal information (like the timestamp) and return the final
448 // Drop internal information (like the timestamp) and return the final
434 // mapping.
449 // mapping.
435 let tt_result = all_copies
450 let tt_result = all_copies
436 .remove(&target_rev)
451 .remove(&target_rev)
437 .expect("target revision was not processed");
452 .expect("target revision was not processed");
438 let mut result = PathCopies::default();
453 let mut result = PathCopies::default();
439 for (dest, tt_source) in tt_result {
454 for (dest, tt_source) in tt_result {
440 if let Some(path) = tt_source.path {
455 if let Some(path) = tt_source.path {
441 let path_dest = path_map.untokenize(dest).to_owned();
456 let path_dest = path_map.untokenize(dest).to_owned();
442 let path_path = path_map.untokenize(path).to_owned();
457 let path_path = path_map.untokenize(path).to_owned();
443 result.insert(path_dest, path_path);
458 result.insert(path_dest, path_path);
444 }
459 }
445 }
460 }
446 result
461 result
447 }
462 }
448
463
449 /// fetch previous computed information
464 /// fetch previous computed information
450 ///
465 ///
451 /// If no other children are expected to need this information, we drop it from
466 /// If no other children are expected to need this information, we drop it from
452 /// the cache.
467 /// the cache.
453 ///
468 ///
454 /// If parent is not part of the set we are expected to walk, return None.
469 /// If parent is not part of the set we are expected to walk, return None.
455 fn get_and_clean_parent_copies(
470 fn get_and_clean_parent_copies(
456 all_copies: &mut HashMap<Revision, InternalPathCopies>,
471 all_copies: &mut HashMap<Revision, InternalPathCopies>,
457 children_count: &mut HashMap<Revision, usize>,
472 children_count: &mut HashMap<Revision, usize>,
458 parent_rev: Revision,
473 parent_rev: Revision,
459 ) -> Option<InternalPathCopies> {
474 ) -> Option<InternalPathCopies> {
460 let count = children_count.get_mut(&parent_rev)?;
475 let count = children_count.get_mut(&parent_rev)?;
461 *count -= 1;
476 *count -= 1;
462 if *count == 0 {
477 if *count == 0 {
463 match all_copies.remove(&parent_rev) {
478 match all_copies.remove(&parent_rev) {
464 Some(c) => Some(c),
479 Some(c) => Some(c),
465 None => Some(InternalPathCopies::default()),
480 None => Some(InternalPathCopies::default()),
466 }
481 }
467 } else {
482 } else {
468 match all_copies.get(&parent_rev) {
483 match all_copies.get(&parent_rev) {
469 Some(c) => Some(c.clone()),
484 Some(c) => Some(c.clone()),
470 None => Some(InternalPathCopies::default()),
485 None => Some(InternalPathCopies::default()),
471 }
486 }
472 }
487 }
473 }
488 }
474
489
475 /// Combine ChangedFiles with some existing PathCopies information and return
490 /// Combine ChangedFiles with some existing PathCopies information and return
476 /// the result
491 /// the result
477 fn chain_changes(
492 fn chain_changes(
478 path_map: &mut TwoWayPathMap,
493 path_map: &mut TwoWayPathMap,
479 base_p1_copies: Option<InternalPathCopies>,
494 base_p1_copies: Option<InternalPathCopies>,
480 base_p2_copies: Option<InternalPathCopies>,
495 base_p2_copies: Option<InternalPathCopies>,
481 changes: &ChangedFiles,
496 changes: &ChangedFiles,
482 current_rev: Revision,
497 current_rev: Revision,
483 ) -> (Option<InternalPathCopies>, Option<InternalPathCopies>) {
498 ) -> (Option<InternalPathCopies>, Option<InternalPathCopies>) {
484 // Fast path the "nothing to do" case.
499 // Fast path the "nothing to do" case.
485 if let (None, None) = (&base_p1_copies, &base_p2_copies) {
500 if let (None, None) = (&base_p1_copies, &base_p2_copies) {
486 return (None, None);
501 return (None, None);
487 }
502 }
488
503
489 let mut p1_copies = base_p1_copies.clone();
504 let mut p1_copies = base_p1_copies.clone();
490 let mut p2_copies = base_p2_copies.clone();
505 let mut p2_copies = base_p2_copies.clone();
491 for action in changes.iter_actions() {
506 for action in changes.iter_actions() {
492 match action {
507 match action {
493 Action::CopiedFromP1(path_dest, path_source) => {
508 Action::CopiedFromP1(path_dest, path_source) => {
494 match &mut p1_copies {
509 match &mut p1_copies {
495 None => (), // This is not a vertex we should proceed.
510 None => (), // This is not a vertex we should proceed.
496 Some(copies) => add_one_copy(
511 Some(copies) => add_one_copy(
497 current_rev,
512 current_rev,
498 path_map,
513 path_map,
499 copies,
514 copies,
500 base_p1_copies.as_ref().unwrap(),
515 base_p1_copies.as_ref().unwrap(),
501 path_dest,
516 path_dest,
502 path_source,
517 path_source,
503 ),
518 ),
504 }
519 }
505 }
520 }
506 Action::CopiedFromP2(path_dest, path_source) => {
521 Action::CopiedFromP2(path_dest, path_source) => {
507 match &mut p2_copies {
522 match &mut p2_copies {
508 None => (), // This is not a vertex we should proceed.
523 None => (), // This is not a vertex we should proceed.
509 Some(copies) => add_one_copy(
524 Some(copies) => add_one_copy(
510 current_rev,
525 current_rev,
511 path_map,
526 path_map,
512 copies,
527 copies,
513 base_p2_copies.as_ref().unwrap(),
528 base_p2_copies.as_ref().unwrap(),
514 path_dest,
529 path_dest,
515 path_source,
530 path_source,
516 ),
531 ),
517 }
532 }
518 }
533 }
519 Action::Removed(deleted_path) => {
534 Action::Removed(deleted_path) => {
520 // We must drop copy information for removed file.
535 // We must drop copy information for removed file.
521 //
536 //
522 // We need to explicitly record them as dropped to
537 // We need to explicitly record them as dropped to
523 // propagate this information when merging two
538 // propagate this information when merging two
524 // InternalPathCopies object.
539 // InternalPathCopies object.
525 let deleted = path_map.tokenize(deleted_path);
540 let deleted = path_map.tokenize(deleted_path);
526
541
527 let p1_entry = match &mut p1_copies {
542 let p1_entry = match &mut p1_copies {
528 None => None,
543 None => None,
529 Some(copies) => match copies.entry(deleted) {
544 Some(copies) => match copies.entry(deleted) {
530 Entry::Occupied(e) => Some(e),
545 Entry::Occupied(e) => Some(e),
531 Entry::Vacant(_) => None,
546 Entry::Vacant(_) => None,
532 },
547 },
533 };
548 };
534 let p2_entry = match &mut p2_copies {
549 let p2_entry = match &mut p2_copies {
535 None => None,
550 None => None,
536 Some(copies) => match copies.entry(deleted) {
551 Some(copies) => match copies.entry(deleted) {
537 Entry::Occupied(e) => Some(e),
552 Entry::Occupied(e) => Some(e),
538 Entry::Vacant(_) => None,
553 Entry::Vacant(_) => None,
539 },
554 },
540 };
555 };
541
556
542 match (p1_entry, p2_entry) {
557 match (p1_entry, p2_entry) {
543 (None, None) => (),
558 (None, None) => (),
544 (Some(mut e), None) => {
559 (Some(mut e), None) => {
545 e.get_mut().mark_delete(current_rev)
560 e.get_mut().mark_delete(current_rev)
546 }
561 }
547 (None, Some(mut e)) => {
562 (None, Some(mut e)) => {
548 e.get_mut().mark_delete(current_rev)
563 e.get_mut().mark_delete(current_rev)
549 }
564 }
550 (Some(mut e1), Some(mut e2)) => {
565 (Some(mut e1), Some(mut e2)) => {
551 let cs1 = e1.get_mut();
566 let cs1 = e1.get_mut();
552 let cs2 = e2.get();
567 let cs2 = e2.get();
553 cs1.mark_delete_with_pair(current_rev, &cs2);
568 cs1.mark_delete_with_pair(current_rev, &cs2);
554 e2.insert(cs1.clone());
569 e2.insert(cs1.clone());
555 }
570 }
556 }
571 }
557 }
572 }
558 }
573 }
559 }
574 }
560 (p1_copies, p2_copies)
575 (p1_copies, p2_copies)
561 }
576 }
562
577
563 // insert one new copy information in an InternalPathCopies
578 // insert one new copy information in an InternalPathCopies
564 //
579 //
565 // This deal with chaining and overwrite.
580 // This deal with chaining and overwrite.
566 fn add_one_copy(
581 fn add_one_copy(
567 current_rev: Revision,
582 current_rev: Revision,
568 path_map: &mut TwoWayPathMap,
583 path_map: &mut TwoWayPathMap,
569 copies: &mut InternalPathCopies,
584 copies: &mut InternalPathCopies,
570 base_copies: &InternalPathCopies,
585 base_copies: &InternalPathCopies,
571 path_dest: &HgPath,
586 path_dest: &HgPath,
572 path_source: &HgPath,
587 path_source: &HgPath,
573 ) {
588 ) {
574 let dest = path_map.tokenize(path_dest);
589 let dest = path_map.tokenize(path_dest);
575 let source = path_map.tokenize(path_source);
590 let source = path_map.tokenize(path_source);
576 let entry;
591 let entry;
577 if let Some(v) = base_copies.get(&source) {
592 if let Some(v) = base_copies.get(&source) {
578 entry = match &v.path {
593 entry = match &v.path {
579 Some(path) => Some((*(path)).to_owned()),
594 Some(path) => Some((*(path)).to_owned()),
580 None => Some(source.to_owned()),
595 None => Some(source.to_owned()),
581 }
596 }
582 } else {
597 } else {
583 entry = Some(source.to_owned());
598 entry = Some(source.to_owned());
584 }
599 }
585 // Each new entry is introduced by the children, we
600 // Each new entry is introduced by the children, we
586 // record this information as we will need it to take
601 // record this information as we will need it to take
587 // the right decision when merging conflicting copy
602 // the right decision when merging conflicting copy
588 // information. See merge_copies_dict for details.
603 // information. See merge_copies_dict for details.
589 match copies.entry(dest) {
604 match copies.entry(dest) {
590 Entry::Vacant(slot) => {
605 Entry::Vacant(slot) => {
591 let ttpc = CopySource::new(current_rev, entry);
606 let ttpc = CopySource::new(current_rev, entry);
592 slot.insert(ttpc);
607 slot.insert(ttpc);
593 }
608 }
594 Entry::Occupied(mut slot) => {
609 Entry::Occupied(mut slot) => {
595 let ttpc = slot.get_mut();
610 let ttpc = slot.get_mut();
596 ttpc.overwrite(current_rev, entry);
611 ttpc.overwrite(current_rev, entry);
597 }
612 }
598 }
613 }
599 }
614 }
600
615
601 /// merge two copies-mapping together, minor and major
616 /// merge two copies-mapping together, minor and major
602 ///
617 ///
603 /// In case of conflict, value from "major" will be picked, unless in some
618 /// In case of conflict, value from "major" will be picked, unless in some
604 /// cases. See inline documentation for details.
619 /// cases. See inline documentation for details.
605 fn merge_copies_dict(
620 fn merge_copies_dict(
606 path_map: &TwoWayPathMap,
621 path_map: &TwoWayPathMap,
607 current_merge: Revision,
622 current_merge: Revision,
608 mut minor: InternalPathCopies,
623 mut minor: InternalPathCopies,
609 mut major: InternalPathCopies,
624 mut major: InternalPathCopies,
610 changes: &ChangedFiles,
625 changes: &ChangedFiles,
611 ) -> InternalPathCopies {
626 ) -> InternalPathCopies {
612 // This closure exist as temporary help while multiple developper are
627 // This closure exist as temporary help while multiple developper are
613 // actively working on this code. Feel free to re-inline it once this
628 // actively working on this code. Feel free to re-inline it once this
614 // code is more settled.
629 // code is more settled.
615 let cmp_value =
630 let cmp_value =
616 |dest: &PathToken, src_minor: &CopySource, src_major: &CopySource| {
631 |dest: &PathToken, src_minor: &CopySource, src_major: &CopySource| {
617 compare_value(
632 compare_value(
618 path_map,
633 path_map,
619 current_merge,
634 current_merge,
620 changes,
635 changes,
621 dest,
636 dest,
622 src_minor,
637 src_minor,
623 src_major,
638 src_major,
624 )
639 )
625 };
640 };
626 if minor.is_empty() {
641 if minor.is_empty() {
627 major
642 major
628 } else if major.is_empty() {
643 } else if major.is_empty() {
629 minor
644 minor
630 } else if minor.len() * 2 < major.len() {
645 } else if minor.len() * 2 < major.len() {
631 // Lets says we are merging two InternalPathCopies instance A and B.
646 // Lets says we are merging two InternalPathCopies instance A and B.
632 //
647 //
633 // If A contains N items, the merge result will never contains more
648 // If A contains N items, the merge result will never contains more
634 // than N values differents than the one in A
649 // than N values differents than the one in A
635 //
650 //
636 // If B contains M items, with M > N, the merge result will always
651 // If B contains M items, with M > N, the merge result will always
637 // result in a minimum of M - N value differents than the on in
652 // result in a minimum of M - N value differents than the on in
638 // A
653 // A
639 //
654 //
640 // As a result, if N < (M-N), we know that simply iterating over A will
655 // As a result, if N < (M-N), we know that simply iterating over A will
641 // yield less difference than iterating over the difference
656 // yield less difference than iterating over the difference
642 // between A and B.
657 // between A and B.
643 //
658 //
644 // This help performance a lot in case were a tiny
659 // This help performance a lot in case were a tiny
645 // InternalPathCopies is merged with a much larger one.
660 // InternalPathCopies is merged with a much larger one.
646 for (dest, src_minor) in minor {
661 for (dest, src_minor) in minor {
647 let src_major = major.get(&dest);
662 let src_major = major.get(&dest);
648 match src_major {
663 match src_major {
649 None => {
664 None => {
650 major.insert(dest, src_minor);
665 major.insert(dest, src_minor);
651 }
666 }
652 Some(src_major) => {
667 Some(src_major) => {
653 let (pick, overwrite) =
668 let (pick, overwrite) =
654 cmp_value(&dest, &src_minor, src_major);
669 cmp_value(&dest, &src_minor, src_major);
655 if overwrite {
670 if overwrite {
656 let src = match pick {
671 let src = match pick {
657 MergePick::Major => CopySource::new_from_merge(
672 MergePick::Major => CopySource::new_from_merge(
658 current_merge,
673 current_merge,
659 src_major,
674 src_major,
660 &src_minor,
675 &src_minor,
661 ),
676 ),
662 MergePick::Minor => CopySource::new_from_merge(
677 MergePick::Minor => CopySource::new_from_merge(
663 current_merge,
678 current_merge,
664 &src_minor,
679 &src_minor,
665 src_major,
680 src_major,
666 ),
681 ),
667 MergePick::Any => CopySource::new_from_merge(
682 MergePick::Any => CopySource::new_from_merge(
668 current_merge,
683 current_merge,
669 src_major,
684 src_major,
670 &src_minor,
685 &src_minor,
671 ),
686 ),
672 };
687 };
673 major.insert(dest, src);
688 major.insert(dest, src);
674 } else {
689 } else {
675 match pick {
690 match pick {
676 MergePick::Any | MergePick::Major => None,
691 MergePick::Any | MergePick::Major => None,
677 MergePick::Minor => major.insert(dest, src_minor),
692 MergePick::Minor => major.insert(dest, src_minor),
678 };
693 };
679 }
694 }
680 }
695 }
681 };
696 };
682 }
697 }
683 major
698 major
684 } else if major.len() * 2 < minor.len() {
699 } else if major.len() * 2 < minor.len() {
685 // This use the same rational than the previous block.
700 // This use the same rational than the previous block.
686 // (Check previous block documentation for details.)
701 // (Check previous block documentation for details.)
687 for (dest, src_major) in major {
702 for (dest, src_major) in major {
688 let src_minor = minor.get(&dest);
703 let src_minor = minor.get(&dest);
689 match src_minor {
704 match src_minor {
690 None => {
705 None => {
691 minor.insert(dest, src_major);
706 minor.insert(dest, src_major);
692 }
707 }
693 Some(src_minor) => {
708 Some(src_minor) => {
694 let (pick, overwrite) =
709 let (pick, overwrite) =
695 cmp_value(&dest, src_minor, &src_major);
710 cmp_value(&dest, src_minor, &src_major);
696 if overwrite {
711 if overwrite {
697 let src = match pick {
712 let src = match pick {
698 MergePick::Major => CopySource::new_from_merge(
713 MergePick::Major => CopySource::new_from_merge(
699 current_merge,
714 current_merge,
700 &src_major,
715 &src_major,
701 src_minor,
716 src_minor,
702 ),
717 ),
703 MergePick::Minor => CopySource::new_from_merge(
718 MergePick::Minor => CopySource::new_from_merge(
704 current_merge,
719 current_merge,
705 src_minor,
720 src_minor,
706 &src_major,
721 &src_major,
707 ),
722 ),
708 MergePick::Any => CopySource::new_from_merge(
723 MergePick::Any => CopySource::new_from_merge(
709 current_merge,
724 current_merge,
710 &src_major,
725 &src_major,
711 src_minor,
726 src_minor,
712 ),
727 ),
713 };
728 };
714 minor.insert(dest, src);
729 minor.insert(dest, src);
715 } else {
730 } else {
716 match pick {
731 match pick {
717 MergePick::Any | MergePick::Minor => None,
732 MergePick::Any | MergePick::Minor => None,
718 MergePick::Major => minor.insert(dest, src_major),
733 MergePick::Major => minor.insert(dest, src_major),
719 };
734 };
720 }
735 }
721 }
736 }
722 };
737 };
723 }
738 }
724 minor
739 minor
725 } else {
740 } else {
726 let mut override_minor = Vec::new();
741 let mut override_minor = Vec::new();
727 let mut override_major = Vec::new();
742 let mut override_major = Vec::new();
728
743
729 let mut to_major = |k: &PathToken, v: &CopySource| {
744 let mut to_major = |k: &PathToken, v: &CopySource| {
730 override_major.push((k.clone(), v.clone()))
745 override_major.push((k.clone(), v.clone()))
731 };
746 };
732 let mut to_minor = |k: &PathToken, v: &CopySource| {
747 let mut to_minor = |k: &PathToken, v: &CopySource| {
733 override_minor.push((k.clone(), v.clone()))
748 override_minor.push((k.clone(), v.clone()))
734 };
749 };
735
750
736 // The diff function leverage detection of the identical subpart if
751 // The diff function leverage detection of the identical subpart if
737 // minor and major has some common ancestors. This make it very
752 // minor and major has some common ancestors. This make it very
738 // fast is most case.
753 // fast is most case.
739 //
754 //
740 // In case where the two map are vastly different in size, the current
755 // In case where the two map are vastly different in size, the current
741 // approach is still slowish because the iteration will iterate over
756 // approach is still slowish because the iteration will iterate over
742 // all the "exclusive" content of the larger on. This situation can be
757 // all the "exclusive" content of the larger on. This situation can be
743 // frequent when the subgraph of revision we are processing has a lot
758 // frequent when the subgraph of revision we are processing has a lot
744 // of roots. Each roots adding they own fully new map to the mix (and
759 // of roots. Each roots adding they own fully new map to the mix (and
745 // likely a small map, if the path from the root to the "main path" is
760 // likely a small map, if the path from the root to the "main path" is
746 // small.
761 // small.
747 //
762 //
748 // We could do better by detecting such situation and processing them
763 // We could do better by detecting such situation and processing them
749 // differently.
764 // differently.
750 for d in minor.diff(&major) {
765 for d in minor.diff(&major) {
751 match d {
766 match d {
752 DiffItem::Add(k, v) => to_minor(k, v),
767 DiffItem::Add(k, v) => to_minor(k, v),
753 DiffItem::Remove(k, v) => to_major(k, v),
768 DiffItem::Remove(k, v) => to_major(k, v),
754 DiffItem::Update { old, new } => {
769 DiffItem::Update { old, new } => {
755 let (dest, src_major) = new;
770 let (dest, src_major) = new;
756 let (_, src_minor) = old;
771 let (_, src_minor) = old;
757 let (pick, overwrite) =
772 let (pick, overwrite) =
758 cmp_value(dest, src_minor, src_major);
773 cmp_value(dest, src_minor, src_major);
759 if overwrite {
774 if overwrite {
760 let src = match pick {
775 let src = match pick {
761 MergePick::Major => CopySource::new_from_merge(
776 MergePick::Major => CopySource::new_from_merge(
762 current_merge,
777 current_merge,
763 src_major,
778 src_major,
764 src_minor,
779 src_minor,
765 ),
780 ),
766 MergePick::Minor => CopySource::new_from_merge(
781 MergePick::Minor => CopySource::new_from_merge(
767 current_merge,
782 current_merge,
768 src_minor,
783 src_minor,
769 src_major,
784 src_major,
770 ),
785 ),
771 MergePick::Any => CopySource::new_from_merge(
786 MergePick::Any => CopySource::new_from_merge(
772 current_merge,
787 current_merge,
773 src_major,
788 src_major,
774 src_minor,
789 src_minor,
775 ),
790 ),
776 };
791 };
777 to_minor(dest, &src);
792 to_minor(dest, &src);
778 to_major(dest, &src);
793 to_major(dest, &src);
779 } else {
794 } else {
780 match pick {
795 match pick {
781 MergePick::Major => to_minor(dest, src_major),
796 MergePick::Major => to_minor(dest, src_major),
782 MergePick::Minor => to_major(dest, src_minor),
797 MergePick::Minor => to_major(dest, src_minor),
783 // If the two entry are identical, no need to do
798 // If the two entry are identical, no need to do
784 // anything (but diff should not have yield them)
799 // anything (but diff should not have yield them)
785 MergePick::Any => unreachable!(),
800 MergePick::Any => unreachable!(),
786 }
801 }
787 }
802 }
788 }
803 }
789 };
804 };
790 }
805 }
791
806
792 let updates;
807 let updates;
793 let mut result;
808 let mut result;
794 if override_major.is_empty() {
809 if override_major.is_empty() {
795 result = major
810 result = major
796 } else if override_minor.is_empty() {
811 } else if override_minor.is_empty() {
797 result = minor
812 result = minor
798 } else {
813 } else {
799 if override_minor.len() < override_major.len() {
814 if override_minor.len() < override_major.len() {
800 updates = override_minor;
815 updates = override_minor;
801 result = minor;
816 result = minor;
802 } else {
817 } else {
803 updates = override_major;
818 updates = override_major;
804 result = major;
819 result = major;
805 }
820 }
806 for (k, v) in updates {
821 for (k, v) in updates {
807 result.insert(k, v);
822 result.insert(k, v);
808 }
823 }
809 }
824 }
810 result
825 result
811 }
826 }
812 }
827 }
813
828
814 /// represent the side that should prevail when merging two
829 /// represent the side that should prevail when merging two
815 /// InternalPathCopies
830 /// InternalPathCopies
816 enum MergePick {
831 enum MergePick {
817 /// The "major" (p1) side prevails
832 /// The "major" (p1) side prevails
818 Major,
833 Major,
819 /// The "minor" (p2) side prevails
834 /// The "minor" (p2) side prevails
820 Minor,
835 Minor,
821 /// Any side could be used (because they are the same)
836 /// Any side could be used (because they are the same)
822 Any,
837 Any,
823 }
838 }
824
839
825 /// decide which side prevails in case of conflicting values
840 /// decide which side prevails in case of conflicting values
826 #[allow(clippy::if_same_then_else)]
841 #[allow(clippy::if_same_then_else)]
827 fn compare_value(
842 fn compare_value(
828 path_map: &TwoWayPathMap,
843 path_map: &TwoWayPathMap,
829 current_merge: Revision,
844 current_merge: Revision,
830 changes: &ChangedFiles,
845 changes: &ChangedFiles,
831 dest: &PathToken,
846 dest: &PathToken,
832 src_minor: &CopySource,
847 src_minor: &CopySource,
833 src_major: &CopySource,
848 src_major: &CopySource,
834 ) -> (MergePick, bool) {
849 ) -> (MergePick, bool) {
835 if src_major.rev == current_merge {
850 if src_major.rev == current_merge {
836 if src_minor.rev == current_merge {
851 if src_minor.rev == current_merge {
837 if src_major.path.is_none() {
852 if src_major.path.is_none() {
838 // We cannot get different copy information for both p1 and p2
853 // We cannot get different copy information for both p1 and p2
839 // from the same revision. Unless this was a
854 // from the same revision. Unless this was a
840 // deletion.
855 // deletion.
841 //
856 //
842 // However the deletion might come over different data on each
857 // However the deletion might come over different data on each
843 // branch.
858 // branch.
844 let need_over = src_major.overwritten != src_minor.overwritten;
859 let need_over = src_major.overwritten != src_minor.overwritten;
845 (MergePick::Any, need_over)
860 (MergePick::Any, need_over)
846 } else {
861 } else {
847 unreachable!();
862 unreachable!();
848 }
863 }
849 } else {
864 } else {
850 // The last value comes the current merge, this value -will- win
865 // The last value comes the current merge, this value -will- win
851 // eventually.
866 // eventually.
852 (MergePick::Major, true)
867 (MergePick::Major, true)
853 }
868 }
854 } else if src_minor.rev == current_merge {
869 } else if src_minor.rev == current_merge {
855 // The last value comes the current merge, this value -will- win
870 // The last value comes the current merge, this value -will- win
856 // eventually.
871 // eventually.
857 (MergePick::Minor, true)
872 (MergePick::Minor, true)
858 } else if src_major.path == src_minor.path {
873 } else if src_major.path == src_minor.path {
859 // we have the same value, but from other source;
874 // we have the same value, but from other source;
860 if src_major.rev == src_minor.rev {
875 if src_major.rev == src_minor.rev {
861 // If the two entry are identical, they are both valid
876 // If the two entry are identical, they are both valid
862 debug_assert!(src_minor.overwritten == src_minor.overwritten);
877 debug_assert!(src_minor.overwritten == src_minor.overwritten);
863 (MergePick::Any, false)
878 (MergePick::Any, false)
864 } else if src_major.is_overwritten_by(src_minor) {
879 } else if src_major.is_overwritten_by(src_minor) {
865 (MergePick::Minor, false)
880 (MergePick::Minor, false)
866 } else if src_minor.is_overwritten_by(src_major) {
881 } else if src_minor.is_overwritten_by(src_major) {
867 (MergePick::Major, false)
882 (MergePick::Major, false)
868 } else {
883 } else {
869 (MergePick::Any, true)
884 (MergePick::Any, true)
870 }
885 }
871 } else if src_major.rev == src_minor.rev {
886 } else if src_major.rev == src_minor.rev {
872 // We cannot get copy information for both p1 and p2 in the
887 // We cannot get copy information for both p1 and p2 in the
873 // same rev. So this is the same value.
888 // same rev. So this is the same value.
874 unreachable!(
889 unreachable!(
875 "conflicting information from p1 and p2 in the same revision"
890 "conflicting information from p1 and p2 in the same revision"
876 );
891 );
877 } else {
892 } else {
878 let dest_path = path_map.untokenize(*dest);
893 let dest_path = path_map.untokenize(*dest);
879 let action = changes.get_merge_case(dest_path);
894 let action = changes.get_merge_case(dest_path);
880 if src_minor.path.is_some()
895 if src_minor.path.is_some()
881 && src_major.path.is_none()
896 && src_major.path.is_none()
882 && action == MergeCase::Salvaged
897 && action == MergeCase::Salvaged
883 {
898 {
884 // If the file is "deleted" in the major side but was
899 // If the file is "deleted" in the major side but was
885 // salvaged by the merge, we keep the minor side alive
900 // salvaged by the merge, we keep the minor side alive
886 (MergePick::Minor, true)
901 (MergePick::Minor, true)
887 } else if src_major.path.is_some()
902 } else if src_major.path.is_some()
888 && src_minor.path.is_none()
903 && src_minor.path.is_none()
889 && action == MergeCase::Salvaged
904 && action == MergeCase::Salvaged
890 {
905 {
891 // If the file is "deleted" in the minor side but was
906 // If the file is "deleted" in the minor side but was
892 // salvaged by the merge, unconditionnaly preserve the
907 // salvaged by the merge, unconditionnaly preserve the
893 // major side.
908 // major side.
894 (MergePick::Major, true)
909 (MergePick::Major, true)
895 } else if src_minor.is_overwritten_by(src_major) {
910 } else if src_minor.is_overwritten_by(src_major) {
896 // The information from the minor version are strictly older than
911 // The information from the minor version are strictly older than
897 // the major version
912 // the major version
898 if action == MergeCase::Merged {
913 if action == MergeCase::Merged {
899 // If the file was actively merged, its means some non-copy
914 // If the file was actively merged, its means some non-copy
900 // activity happened on the other branch. It
915 // activity happened on the other branch. It
901 // mean the older copy information are still relevant.
916 // mean the older copy information are still relevant.
902 //
917 //
903 // The major side wins such conflict.
918 // The major side wins such conflict.
904 (MergePick::Major, true)
919 (MergePick::Major, true)
905 } else {
920 } else {
906 // No activity on the minor branch, pick the newer one.
921 // No activity on the minor branch, pick the newer one.
907 (MergePick::Major, false)
922 (MergePick::Major, false)
908 }
923 }
909 } else if src_major.is_overwritten_by(src_minor) {
924 } else if src_major.is_overwritten_by(src_minor) {
910 if action == MergeCase::Merged {
925 if action == MergeCase::Merged {
911 // If the file was actively merged, its means some non-copy
926 // If the file was actively merged, its means some non-copy
912 // activity happened on the other branch. It
927 // activity happened on the other branch. It
913 // mean the older copy information are still relevant.
928 // mean the older copy information are still relevant.
914 //
929 //
915 // The major side wins such conflict.
930 // The major side wins such conflict.
916 (MergePick::Major, true)
931 (MergePick::Major, true)
917 } else {
932 } else {
918 // No activity on the minor branch, pick the newer one.
933 // No activity on the minor branch, pick the newer one.
919 (MergePick::Minor, false)
934 (MergePick::Minor, false)
920 }
935 }
921 } else if src_minor.path.is_none() {
936 } else if src_minor.path.is_none() {
922 // the minor side has no relevant information, pick the alive one
937 // the minor side has no relevant information, pick the alive one
923 (MergePick::Major, true)
938 (MergePick::Major, true)
924 } else if src_major.path.is_none() {
939 } else if src_major.path.is_none() {
925 // the major side has no relevant information, pick the alive one
940 // the major side has no relevant information, pick the alive one
926 (MergePick::Minor, true)
941 (MergePick::Minor, true)
927 } else {
942 } else {
928 // by default the major side wins
943 // by default the major side wins
929 (MergePick::Major, true)
944 (MergePick::Major, true)
930 }
945 }
931 }
946 }
932 }
947 }
General Comments 0
You need to be logged in to leave comments. Login now