Show More
@@ -1,552 +1,551 b'' | |||||
1 | # Copyright (C) 2004, 2005 Canonical Ltd |
|
1 | # Copyright (C) 2004, 2005 Canonical Ltd | |
2 | # |
|
2 | # | |
3 | # This program is free software; you can redistribute it and/or modify |
|
3 | # This program is free software; you can redistribute it and/or modify | |
4 | # it under the terms of the GNU General Public License as published by |
|
4 | # it under the terms of the GNU General Public License as published by | |
5 | # the Free Software Foundation; either version 2 of the License, or |
|
5 | # the Free Software Foundation; either version 2 of the License, or | |
6 | # (at your option) any later version. |
|
6 | # (at your option) any later version. | |
7 | # |
|
7 | # | |
8 | # This program is distributed in the hope that it will be useful, |
|
8 | # This program is distributed in the hope that it will be useful, | |
9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
9 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
10 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
11 | # GNU General Public License for more details. |
|
11 | # GNU General Public License for more details. | |
12 | # |
|
12 | # | |
13 | # You should have received a copy of the GNU General Public License |
|
13 | # You should have received a copy of the GNU General Public License | |
14 | # along with this program; if not, see <http://www.gnu.org/licenses/>. |
|
14 | # along with this program; if not, see <http://www.gnu.org/licenses/>. | |
15 |
|
15 | |||
16 | # mbp: "you know that thing where cvs gives you conflict markers?" |
|
16 | # mbp: "you know that thing where cvs gives you conflict markers?" | |
17 | # s: "i hate that." |
|
17 | # s: "i hate that." | |
18 |
|
18 | |||
19 | from __future__ import absolute_import |
|
19 | from __future__ import absolute_import | |
20 |
|
20 | |||
21 | from .i18n import _ |
|
21 | from .i18n import _ | |
22 | from .thirdparty import attr |
|
|||
23 | from . import ( |
|
22 | from . import ( | |
24 | error, |
|
23 | error, | |
25 | mdiff, |
|
24 | mdiff, | |
26 | pycompat, |
|
25 | pycompat, | |
27 | ) |
|
26 | ) | |
28 | from .utils import stringutil |
|
27 | from .utils import stringutil | |
29 |
|
28 | |||
30 |
|
29 | |||
31 | def intersect(ra, rb): |
|
30 | def intersect(ra, rb): | |
32 | """Given two ranges return the range where they intersect or None. |
|
31 | """Given two ranges return the range where they intersect or None. | |
33 |
|
32 | |||
34 | >>> intersect((0, 10), (0, 6)) |
|
33 | >>> intersect((0, 10), (0, 6)) | |
35 | (0, 6) |
|
34 | (0, 6) | |
36 | >>> intersect((0, 10), (5, 15)) |
|
35 | >>> intersect((0, 10), (5, 15)) | |
37 | (5, 10) |
|
36 | (5, 10) | |
38 | >>> intersect((0, 10), (10, 15)) |
|
37 | >>> intersect((0, 10), (10, 15)) | |
39 | >>> intersect((0, 9), (10, 15)) |
|
38 | >>> intersect((0, 9), (10, 15)) | |
40 | >>> intersect((0, 9), (7, 15)) |
|
39 | >>> intersect((0, 9), (7, 15)) | |
41 | (7, 9) |
|
40 | (7, 9) | |
42 | """ |
|
41 | """ | |
43 | assert ra[0] <= ra[1] |
|
42 | assert ra[0] <= ra[1] | |
44 | assert rb[0] <= rb[1] |
|
43 | assert rb[0] <= rb[1] | |
45 |
|
44 | |||
46 | sa = max(ra[0], rb[0]) |
|
45 | sa = max(ra[0], rb[0]) | |
47 | sb = min(ra[1], rb[1]) |
|
46 | sb = min(ra[1], rb[1]) | |
48 | if sa < sb: |
|
47 | if sa < sb: | |
49 | return sa, sb |
|
48 | return sa, sb | |
50 | else: |
|
49 | else: | |
51 | return None |
|
50 | return None | |
52 |
|
51 | |||
53 |
|
52 | |||
54 | def compare_range(a, astart, aend, b, bstart, bend): |
|
53 | def compare_range(a, astart, aend, b, bstart, bend): | |
55 | """Compare a[astart:aend] == b[bstart:bend], without slicing.""" |
|
54 | """Compare a[astart:aend] == b[bstart:bend], without slicing.""" | |
56 | if (aend - astart) != (bend - bstart): |
|
55 | if (aend - astart) != (bend - bstart): | |
57 | return False |
|
56 | return False | |
58 | for ia, ib in zip( |
|
57 | for ia, ib in zip( | |
59 | pycompat.xrange(astart, aend), pycompat.xrange(bstart, bend) |
|
58 | pycompat.xrange(astart, aend), pycompat.xrange(bstart, bend) | |
60 | ): |
|
59 | ): | |
61 | if a[ia] != b[ib]: |
|
60 | if a[ia] != b[ib]: | |
62 | return False |
|
61 | return False | |
63 | else: |
|
62 | else: | |
64 | return True |
|
63 | return True | |
65 |
|
64 | |||
66 |
|
65 | |||
67 | class Merge3Text(object): |
|
66 | class Merge3Text(object): | |
68 | """3-way merge of texts. |
|
67 | """3-way merge of texts. | |
69 |
|
68 | |||
70 | Given strings BASE, OTHER, THIS, tries to produce a combined text |
|
69 | Given strings BASE, OTHER, THIS, tries to produce a combined text | |
71 | incorporating the changes from both BASE->OTHER and BASE->THIS.""" |
|
70 | incorporating the changes from both BASE->OTHER and BASE->THIS.""" | |
72 |
|
71 | |||
73 | def __init__(self, basetext, atext, btext, base=None, a=None, b=None): |
|
72 | def __init__(self, basetext, atext, btext, base=None, a=None, b=None): | |
74 | self.basetext = basetext |
|
73 | self.basetext = basetext | |
75 | self.atext = atext |
|
74 | self.atext = atext | |
76 | self.btext = btext |
|
75 | self.btext = btext | |
77 | if base is None: |
|
76 | if base is None: | |
78 | base = mdiff.splitnewlines(basetext) |
|
77 | base = mdiff.splitnewlines(basetext) | |
79 | if a is None: |
|
78 | if a is None: | |
80 | a = mdiff.splitnewlines(atext) |
|
79 | a = mdiff.splitnewlines(atext) | |
81 | if b is None: |
|
80 | if b is None: | |
82 | b = mdiff.splitnewlines(btext) |
|
81 | b = mdiff.splitnewlines(btext) | |
83 | self.base = base |
|
82 | self.base = base | |
84 | self.a = a |
|
83 | self.a = a | |
85 | self.b = b |
|
84 | self.b = b | |
86 |
|
85 | |||
87 | def merge_groups(self): |
|
86 | def merge_groups(self): | |
88 | """Yield sequence of line groups. Each one is a tuple: |
|
87 | """Yield sequence of line groups. Each one is a tuple: | |
89 |
|
88 | |||
90 | 'unchanged', lines |
|
89 | 'unchanged', lines | |
91 | Lines unchanged from base |
|
90 | Lines unchanged from base | |
92 |
|
91 | |||
93 | 'a', lines |
|
92 | 'a', lines | |
94 | Lines taken from a |
|
93 | Lines taken from a | |
95 |
|
94 | |||
96 | 'same', lines |
|
95 | 'same', lines | |
97 | Lines taken from a (and equal to b) |
|
96 | Lines taken from a (and equal to b) | |
98 |
|
97 | |||
99 | 'b', lines |
|
98 | 'b', lines | |
100 | Lines taken from b |
|
99 | Lines taken from b | |
101 |
|
100 | |||
102 | 'conflict', (base_lines, a_lines, b_lines) |
|
101 | 'conflict', (base_lines, a_lines, b_lines) | |
103 | Lines from base were changed to either a or b and conflict. |
|
102 | Lines from base were changed to either a or b and conflict. | |
104 | """ |
|
103 | """ | |
105 | for t in self.merge_regions(): |
|
104 | for t in self.merge_regions(): | |
106 | what = t[0] |
|
105 | what = t[0] | |
107 | if what == b'unchanged': |
|
106 | if what == b'unchanged': | |
108 | yield what, self.base[t[1] : t[2]] |
|
107 | yield what, self.base[t[1] : t[2]] | |
109 | elif what == b'a' or what == b'same': |
|
108 | elif what == b'a' or what == b'same': | |
110 | yield what, self.a[t[1] : t[2]] |
|
109 | yield what, self.a[t[1] : t[2]] | |
111 | elif what == b'b': |
|
110 | elif what == b'b': | |
112 | yield what, self.b[t[1] : t[2]] |
|
111 | yield what, self.b[t[1] : t[2]] | |
113 | elif what == b'conflict': |
|
112 | elif what == b'conflict': | |
114 | yield ( |
|
113 | yield ( | |
115 | what, |
|
114 | what, | |
116 | ( |
|
115 | ( | |
117 | self.base[t[1] : t[2]], |
|
116 | self.base[t[1] : t[2]], | |
118 | self.a[t[3] : t[4]], |
|
117 | self.a[t[3] : t[4]], | |
119 | self.b[t[5] : t[6]], |
|
118 | self.b[t[5] : t[6]], | |
120 | ), |
|
119 | ), | |
121 | ) |
|
120 | ) | |
122 | else: |
|
121 | else: | |
123 | raise ValueError(what) |
|
122 | raise ValueError(what) | |
124 |
|
123 | |||
125 | def merge_regions(self): |
|
124 | def merge_regions(self): | |
126 | """Return sequences of matching and conflicting regions. |
|
125 | """Return sequences of matching and conflicting regions. | |
127 |
|
126 | |||
128 | This returns tuples, where the first value says what kind we |
|
127 | This returns tuples, where the first value says what kind we | |
129 | have: |
|
128 | have: | |
130 |
|
129 | |||
131 | 'unchanged', start, end |
|
130 | 'unchanged', start, end | |
132 | Take a region of base[start:end] |
|
131 | Take a region of base[start:end] | |
133 |
|
132 | |||
134 | 'same', astart, aend |
|
133 | 'same', astart, aend | |
135 | b and a are different from base but give the same result |
|
134 | b and a are different from base but give the same result | |
136 |
|
135 | |||
137 | 'a', start, end |
|
136 | 'a', start, end | |
138 | Non-clashing insertion from a[start:end] |
|
137 | Non-clashing insertion from a[start:end] | |
139 |
|
138 | |||
140 | 'conflict', zstart, zend, astart, aend, bstart, bend |
|
139 | 'conflict', zstart, zend, astart, aend, bstart, bend | |
141 | Conflict between a and b, with z as common ancestor |
|
140 | Conflict between a and b, with z as common ancestor | |
142 |
|
141 | |||
143 | Method is as follows: |
|
142 | Method is as follows: | |
144 |
|
143 | |||
145 | The two sequences align only on regions which match the base |
|
144 | The two sequences align only on regions which match the base | |
146 | and both descendants. These are found by doing a two-way diff |
|
145 | and both descendants. These are found by doing a two-way diff | |
147 | of each one against the base, and then finding the |
|
146 | of each one against the base, and then finding the | |
148 | intersections between those regions. These "sync regions" |
|
147 | intersections between those regions. These "sync regions" | |
149 | are by definition unchanged in both and easily dealt with. |
|
148 | are by definition unchanged in both and easily dealt with. | |
150 |
|
149 | |||
151 | The regions in between can be in any of three cases: |
|
150 | The regions in between can be in any of three cases: | |
152 | conflicted, or changed on only one side. |
|
151 | conflicted, or changed on only one side. | |
153 | """ |
|
152 | """ | |
154 |
|
153 | |||
155 | # section a[0:ia] has been disposed of, etc |
|
154 | # section a[0:ia] has been disposed of, etc | |
156 | iz = ia = ib = 0 |
|
155 | iz = ia = ib = 0 | |
157 |
|
156 | |||
158 | for region in self.find_sync_regions(): |
|
157 | for region in self.find_sync_regions(): | |
159 | zmatch, zend, amatch, aend, bmatch, bend = region |
|
158 | zmatch, zend, amatch, aend, bmatch, bend = region | |
160 | # print 'match base [%d:%d]' % (zmatch, zend) |
|
159 | # print 'match base [%d:%d]' % (zmatch, zend) | |
161 |
|
160 | |||
162 | matchlen = zend - zmatch |
|
161 | matchlen = zend - zmatch | |
163 | assert matchlen >= 0 |
|
162 | assert matchlen >= 0 | |
164 | assert matchlen == (aend - amatch) |
|
163 | assert matchlen == (aend - amatch) | |
165 | assert matchlen == (bend - bmatch) |
|
164 | assert matchlen == (bend - bmatch) | |
166 |
|
165 | |||
167 | len_a = amatch - ia |
|
166 | len_a = amatch - ia | |
168 | len_b = bmatch - ib |
|
167 | len_b = bmatch - ib | |
169 | len_base = zmatch - iz |
|
168 | len_base = zmatch - iz | |
170 | assert len_a >= 0 |
|
169 | assert len_a >= 0 | |
171 | assert len_b >= 0 |
|
170 | assert len_b >= 0 | |
172 | assert len_base >= 0 |
|
171 | assert len_base >= 0 | |
173 |
|
172 | |||
174 | # print 'unmatched a=%d, b=%d' % (len_a, len_b) |
|
173 | # print 'unmatched a=%d, b=%d' % (len_a, len_b) | |
175 |
|
174 | |||
176 | if len_a or len_b: |
|
175 | if len_a or len_b: | |
177 | # try to avoid actually slicing the lists |
|
176 | # try to avoid actually slicing the lists | |
178 | equal_a = compare_range( |
|
177 | equal_a = compare_range( | |
179 | self.a, ia, amatch, self.base, iz, zmatch |
|
178 | self.a, ia, amatch, self.base, iz, zmatch | |
180 | ) |
|
179 | ) | |
181 | equal_b = compare_range( |
|
180 | equal_b = compare_range( | |
182 | self.b, ib, bmatch, self.base, iz, zmatch |
|
181 | self.b, ib, bmatch, self.base, iz, zmatch | |
183 | ) |
|
182 | ) | |
184 | same = compare_range(self.a, ia, amatch, self.b, ib, bmatch) |
|
183 | same = compare_range(self.a, ia, amatch, self.b, ib, bmatch) | |
185 |
|
184 | |||
186 | if same: |
|
185 | if same: | |
187 | yield b'same', ia, amatch |
|
186 | yield b'same', ia, amatch | |
188 | elif equal_a and not equal_b: |
|
187 | elif equal_a and not equal_b: | |
189 | yield b'b', ib, bmatch |
|
188 | yield b'b', ib, bmatch | |
190 | elif equal_b and not equal_a: |
|
189 | elif equal_b and not equal_a: | |
191 | yield b'a', ia, amatch |
|
190 | yield b'a', ia, amatch | |
192 | elif not equal_a and not equal_b: |
|
191 | elif not equal_a and not equal_b: | |
193 | yield b'conflict', iz, zmatch, ia, amatch, ib, bmatch |
|
192 | yield b'conflict', iz, zmatch, ia, amatch, ib, bmatch | |
194 | else: |
|
193 | else: | |
195 | raise AssertionError(b"can't handle a=b=base but unmatched") |
|
194 | raise AssertionError(b"can't handle a=b=base but unmatched") | |
196 |
|
195 | |||
197 | ia = amatch |
|
196 | ia = amatch | |
198 | ib = bmatch |
|
197 | ib = bmatch | |
199 | iz = zmatch |
|
198 | iz = zmatch | |
200 |
|
199 | |||
201 | # if the same part of the base was deleted on both sides |
|
200 | # if the same part of the base was deleted on both sides | |
202 | # that's OK, we can just skip it. |
|
201 | # that's OK, we can just skip it. | |
203 |
|
202 | |||
204 | if matchlen > 0: |
|
203 | if matchlen > 0: | |
205 | assert ia == amatch |
|
204 | assert ia == amatch | |
206 | assert ib == bmatch |
|
205 | assert ib == bmatch | |
207 | assert iz == zmatch |
|
206 | assert iz == zmatch | |
208 |
|
207 | |||
209 | yield b'unchanged', zmatch, zend |
|
208 | yield b'unchanged', zmatch, zend | |
210 | iz = zend |
|
209 | iz = zend | |
211 | ia = aend |
|
210 | ia = aend | |
212 | ib = bend |
|
211 | ib = bend | |
213 |
|
212 | |||
214 | def find_sync_regions(self): |
|
213 | def find_sync_regions(self): | |
215 | """Return a list of sync regions, where both descendants match the base. |
|
214 | """Return a list of sync regions, where both descendants match the base. | |
216 |
|
215 | |||
217 | Generates a list of (base1, base2, a1, a2, b1, b2). There is |
|
216 | Generates a list of (base1, base2, a1, a2, b1, b2). There is | |
218 | always a zero-length sync region at the end of all the files. |
|
217 | always a zero-length sync region at the end of all the files. | |
219 | """ |
|
218 | """ | |
220 |
|
219 | |||
221 | ia = ib = 0 |
|
220 | ia = ib = 0 | |
222 | amatches = mdiff.get_matching_blocks(self.basetext, self.atext) |
|
221 | amatches = mdiff.get_matching_blocks(self.basetext, self.atext) | |
223 | bmatches = mdiff.get_matching_blocks(self.basetext, self.btext) |
|
222 | bmatches = mdiff.get_matching_blocks(self.basetext, self.btext) | |
224 | len_a = len(amatches) |
|
223 | len_a = len(amatches) | |
225 | len_b = len(bmatches) |
|
224 | len_b = len(bmatches) | |
226 |
|
225 | |||
227 | sl = [] |
|
226 | sl = [] | |
228 |
|
227 | |||
229 | while ia < len_a and ib < len_b: |
|
228 | while ia < len_a and ib < len_b: | |
230 | abase, amatch, alen = amatches[ia] |
|
229 | abase, amatch, alen = amatches[ia] | |
231 | bbase, bmatch, blen = bmatches[ib] |
|
230 | bbase, bmatch, blen = bmatches[ib] | |
232 |
|
231 | |||
233 | # there is an unconflicted block at i; how long does it |
|
232 | # there is an unconflicted block at i; how long does it | |
234 | # extend? until whichever one ends earlier. |
|
233 | # extend? until whichever one ends earlier. | |
235 | i = intersect((abase, abase + alen), (bbase, bbase + blen)) |
|
234 | i = intersect((abase, abase + alen), (bbase, bbase + blen)) | |
236 | if i: |
|
235 | if i: | |
237 | intbase = i[0] |
|
236 | intbase = i[0] | |
238 | intend = i[1] |
|
237 | intend = i[1] | |
239 | intlen = intend - intbase |
|
238 | intlen = intend - intbase | |
240 |
|
239 | |||
241 | # found a match of base[i[0], i[1]]; this may be less than |
|
240 | # found a match of base[i[0], i[1]]; this may be less than | |
242 | # the region that matches in either one |
|
241 | # the region that matches in either one | |
243 | assert intlen <= alen |
|
242 | assert intlen <= alen | |
244 | assert intlen <= blen |
|
243 | assert intlen <= blen | |
245 | assert abase <= intbase |
|
244 | assert abase <= intbase | |
246 | assert bbase <= intbase |
|
245 | assert bbase <= intbase | |
247 |
|
246 | |||
248 | asub = amatch + (intbase - abase) |
|
247 | asub = amatch + (intbase - abase) | |
249 | bsub = bmatch + (intbase - bbase) |
|
248 | bsub = bmatch + (intbase - bbase) | |
250 | aend = asub + intlen |
|
249 | aend = asub + intlen | |
251 | bend = bsub + intlen |
|
250 | bend = bsub + intlen | |
252 |
|
251 | |||
253 | assert self.base[intbase:intend] == self.a[asub:aend], ( |
|
252 | assert self.base[intbase:intend] == self.a[asub:aend], ( | |
254 | self.base[intbase:intend], |
|
253 | self.base[intbase:intend], | |
255 | self.a[asub:aend], |
|
254 | self.a[asub:aend], | |
256 | ) |
|
255 | ) | |
257 |
|
256 | |||
258 | assert self.base[intbase:intend] == self.b[bsub:bend] |
|
257 | assert self.base[intbase:intend] == self.b[bsub:bend] | |
259 |
|
258 | |||
260 | sl.append((intbase, intend, asub, aend, bsub, bend)) |
|
259 | sl.append((intbase, intend, asub, aend, bsub, bend)) | |
261 |
|
260 | |||
262 | # advance whichever one ends first in the base text |
|
261 | # advance whichever one ends first in the base text | |
263 | if (abase + alen) < (bbase + blen): |
|
262 | if (abase + alen) < (bbase + blen): | |
264 | ia += 1 |
|
263 | ia += 1 | |
265 | else: |
|
264 | else: | |
266 | ib += 1 |
|
265 | ib += 1 | |
267 |
|
266 | |||
268 | intbase = len(self.base) |
|
267 | intbase = len(self.base) | |
269 | abase = len(self.a) |
|
268 | abase = len(self.a) | |
270 | bbase = len(self.b) |
|
269 | bbase = len(self.b) | |
271 | sl.append((intbase, intbase, abase, abase, bbase, bbase)) |
|
270 | sl.append((intbase, intbase, abase, abase, bbase, bbase)) | |
272 |
|
271 | |||
273 | return sl |
|
272 | return sl | |
274 |
|
273 | |||
275 |
|
274 | |||
276 | def _verifytext(text, path, ui, quiet=False, allow_binary=False): |
|
275 | def _verifytext(text, path, ui, quiet=False, allow_binary=False): | |
277 | """verifies that text is non-binary (unless opts[text] is passed, |
|
276 | """verifies that text is non-binary (unless opts[text] is passed, | |
278 | then we just warn)""" |
|
277 | then we just warn)""" | |
279 | if stringutil.binary(text): |
|
278 | if stringutil.binary(text): | |
280 | msg = _(b"%s looks like a binary file.") % path |
|
279 | msg = _(b"%s looks like a binary file.") % path | |
281 | if not quiet: |
|
280 | if not quiet: | |
282 | ui.warn(_(b'warning: %s\n') % msg) |
|
281 | ui.warn(_(b'warning: %s\n') % msg) | |
283 | if not allow_binary: |
|
282 | if not allow_binary: | |
284 | raise error.Abort(msg) |
|
283 | raise error.Abort(msg) | |
285 | return text |
|
284 | return text | |
286 |
|
285 | |||
287 |
|
286 | |||
288 | def _format_labels(*inputs): |
|
287 | def _format_labels(*inputs): | |
289 | pad = max(len(input.label) if input.label else 0 for input in inputs) |
|
288 | pad = max(len(input.label) if input.label else 0 for input in inputs) | |
290 | labels = [] |
|
289 | labels = [] | |
291 | for input in inputs: |
|
290 | for input in inputs: | |
292 | if input.label: |
|
291 | if input.label: | |
293 | if input.label_detail: |
|
292 | if input.label_detail: | |
294 | label = ( |
|
293 | label = ( | |
295 | (input.label + b':').ljust(pad + 1) |
|
294 | (input.label + b':').ljust(pad + 1) | |
296 | + b' ' |
|
295 | + b' ' | |
297 | + input.label_detail |
|
296 | + input.label_detail | |
298 | ) |
|
297 | ) | |
299 | else: |
|
298 | else: | |
300 | label = input.label |
|
299 | label = input.label | |
301 | # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ') |
|
300 | # 8 for the prefix of conflict marker lines (e.g. '<<<<<<< ') | |
302 | labels.append(stringutil.ellipsis(label, 80 - 8)) |
|
301 | labels.append(stringutil.ellipsis(label, 80 - 8)) | |
303 | else: |
|
302 | else: | |
304 | labels.append(None) |
|
303 | labels.append(None) | |
305 | return labels |
|
304 | return labels | |
306 |
|
305 | |||
307 |
|
306 | |||
308 | def _detect_newline(m3): |
|
307 | def _detect_newline(m3): | |
309 | if len(m3.a) > 0: |
|
308 | if len(m3.a) > 0: | |
310 | if m3.a[0].endswith(b'\r\n'): |
|
309 | if m3.a[0].endswith(b'\r\n'): | |
311 | return b'\r\n' |
|
310 | return b'\r\n' | |
312 | elif m3.a[0].endswith(b'\r'): |
|
311 | elif m3.a[0].endswith(b'\r'): | |
313 | return b'\r' |
|
312 | return b'\r' | |
314 | return b'\n' |
|
313 | return b'\n' | |
315 |
|
314 | |||
316 |
|
315 | |||
317 | def _minimize(a_lines, b_lines): |
|
316 | def _minimize(a_lines, b_lines): | |
318 | """Trim conflict regions of lines where A and B sides match. |
|
317 | """Trim conflict regions of lines where A and B sides match. | |
319 |
|
318 | |||
320 | Lines where both A and B have made the same changes at the beginning |
|
319 | Lines where both A and B have made the same changes at the beginning | |
321 | or the end of each merge region are eliminated from the conflict |
|
320 | or the end of each merge region are eliminated from the conflict | |
322 | region and are instead considered the same. |
|
321 | region and are instead considered the same. | |
323 | """ |
|
322 | """ | |
324 | alen = len(a_lines) |
|
323 | alen = len(a_lines) | |
325 | blen = len(b_lines) |
|
324 | blen = len(b_lines) | |
326 |
|
325 | |||
327 | # find matches at the front |
|
326 | # find matches at the front | |
328 | ii = 0 |
|
327 | ii = 0 | |
329 | while ii < alen and ii < blen and a_lines[ii] == b_lines[ii]: |
|
328 | while ii < alen and ii < blen and a_lines[ii] == b_lines[ii]: | |
330 | ii += 1 |
|
329 | ii += 1 | |
331 | startmatches = ii |
|
330 | startmatches = ii | |
332 |
|
331 | |||
333 | # find matches at the end |
|
332 | # find matches at the end | |
334 | ii = 0 |
|
333 | ii = 0 | |
335 | while ii < alen and ii < blen and a_lines[-ii - 1] == b_lines[-ii - 1]: |
|
334 | while ii < alen and ii < blen and a_lines[-ii - 1] == b_lines[-ii - 1]: | |
336 | ii += 1 |
|
335 | ii += 1 | |
337 | endmatches = ii |
|
336 | endmatches = ii | |
338 |
|
337 | |||
339 | lines_before = a_lines[:startmatches] |
|
338 | lines_before = a_lines[:startmatches] | |
340 | new_a_lines = a_lines[startmatches : alen - endmatches] |
|
339 | new_a_lines = a_lines[startmatches : alen - endmatches] | |
341 | new_b_lines = b_lines[startmatches : blen - endmatches] |
|
340 | new_b_lines = b_lines[startmatches : blen - endmatches] | |
342 | lines_after = a_lines[alen - endmatches :] |
|
341 | lines_after = a_lines[alen - endmatches :] | |
343 | return lines_before, new_a_lines, new_b_lines, lines_after |
|
342 | return lines_before, new_a_lines, new_b_lines, lines_after | |
344 |
|
343 | |||
345 |
|
344 | |||
346 | def render_minimized( |
|
345 | def render_minimized( | |
347 | m3, |
|
346 | m3, | |
348 | name_a=None, |
|
347 | name_a=None, | |
349 | name_b=None, |
|
348 | name_b=None, | |
350 | start_marker=b'<<<<<<<', |
|
349 | start_marker=b'<<<<<<<', | |
351 | mid_marker=b'=======', |
|
350 | mid_marker=b'=======', | |
352 | end_marker=b'>>>>>>>', |
|
351 | end_marker=b'>>>>>>>', | |
353 | ): |
|
352 | ): | |
354 | """Return merge in cvs-like form.""" |
|
353 | """Return merge in cvs-like form.""" | |
355 | newline = _detect_newline(m3) |
|
354 | newline = _detect_newline(m3) | |
356 | conflicts = False |
|
355 | conflicts = False | |
357 | if name_a: |
|
356 | if name_a: | |
358 | start_marker = start_marker + b' ' + name_a |
|
357 | start_marker = start_marker + b' ' + name_a | |
359 | if name_b: |
|
358 | if name_b: | |
360 | end_marker = end_marker + b' ' + name_b |
|
359 | end_marker = end_marker + b' ' + name_b | |
361 | merge_groups = m3.merge_groups() |
|
360 | merge_groups = m3.merge_groups() | |
362 | lines = [] |
|
361 | lines = [] | |
363 | for what, group_lines in merge_groups: |
|
362 | for what, group_lines in merge_groups: | |
364 | if what == b'conflict': |
|
363 | if what == b'conflict': | |
365 | conflicts = True |
|
364 | conflicts = True | |
366 | base_lines, a_lines, b_lines = group_lines |
|
365 | base_lines, a_lines, b_lines = group_lines | |
367 | minimized = _minimize(a_lines, b_lines) |
|
366 | minimized = _minimize(a_lines, b_lines) | |
368 | lines_before, a_lines, b_lines, lines_after = minimized |
|
367 | lines_before, a_lines, b_lines, lines_after = minimized | |
369 | lines.extend(lines_before) |
|
368 | lines.extend(lines_before) | |
370 | lines.append(start_marker + newline) |
|
369 | lines.append(start_marker + newline) | |
371 | lines.extend(a_lines) |
|
370 | lines.extend(a_lines) | |
372 | lines.append(mid_marker + newline) |
|
371 | lines.append(mid_marker + newline) | |
373 | lines.extend(b_lines) |
|
372 | lines.extend(b_lines) | |
374 | lines.append(end_marker + newline) |
|
373 | lines.append(end_marker + newline) | |
375 | lines.extend(lines_after) |
|
374 | lines.extend(lines_after) | |
376 | else: |
|
375 | else: | |
377 | lines.extend(group_lines) |
|
376 | lines.extend(group_lines) | |
378 | return lines, conflicts |
|
377 | return lines, conflicts | |
379 |
|
378 | |||
380 |
|
379 | |||
381 | def render_merge3(m3, name_a, name_b, name_base): |
|
380 | def render_merge3(m3, name_a, name_b, name_base): | |
382 | """Render conflicts as 3-way conflict markers.""" |
|
381 | """Render conflicts as 3-way conflict markers.""" | |
383 | newline = _detect_newline(m3) |
|
382 | newline = _detect_newline(m3) | |
384 | conflicts = False |
|
383 | conflicts = False | |
385 | lines = [] |
|
384 | lines = [] | |
386 | for what, group_lines in m3.merge_groups(): |
|
385 | for what, group_lines in m3.merge_groups(): | |
387 | if what == b'conflict': |
|
386 | if what == b'conflict': | |
388 | base_lines, a_lines, b_lines = group_lines |
|
387 | base_lines, a_lines, b_lines = group_lines | |
389 | conflicts = True |
|
388 | conflicts = True | |
390 | lines.append(b'<<<<<<< ' + name_a + newline) |
|
389 | lines.append(b'<<<<<<< ' + name_a + newline) | |
391 | lines.extend(a_lines) |
|
390 | lines.extend(a_lines) | |
392 | lines.append(b'||||||| ' + name_base + newline) |
|
391 | lines.append(b'||||||| ' + name_base + newline) | |
393 | lines.extend(base_lines) |
|
392 | lines.extend(base_lines) | |
394 | lines.append(b'=======' + newline) |
|
393 | lines.append(b'=======' + newline) | |
395 | lines.extend(b_lines) |
|
394 | lines.extend(b_lines) | |
396 | lines.append(b'>>>>>>> ' + name_b + newline) |
|
395 | lines.append(b'>>>>>>> ' + name_b + newline) | |
397 | else: |
|
396 | else: | |
398 | lines.extend(group_lines) |
|
397 | lines.extend(group_lines) | |
399 | return lines, conflicts |
|
398 | return lines, conflicts | |
400 |
|
399 | |||
401 |
|
400 | |||
402 | def render_mergediff(m3, name_a, name_b, name_base): |
|
401 | def render_mergediff(m3, name_a, name_b, name_base): | |
403 | """Render conflicts as conflict markers with one snapshot and one diff.""" |
|
402 | """Render conflicts as conflict markers with one snapshot and one diff.""" | |
404 | newline = _detect_newline(m3) |
|
403 | newline = _detect_newline(m3) | |
405 | lines = [] |
|
404 | lines = [] | |
406 | conflicts = False |
|
405 | conflicts = False | |
407 | for what, group_lines in m3.merge_groups(): |
|
406 | for what, group_lines in m3.merge_groups(): | |
408 | if what == b'conflict': |
|
407 | if what == b'conflict': | |
409 | base_lines, a_lines, b_lines = group_lines |
|
408 | base_lines, a_lines, b_lines = group_lines | |
410 | base_text = b''.join(base_lines) |
|
409 | base_text = b''.join(base_lines) | |
411 | b_blocks = list( |
|
410 | b_blocks = list( | |
412 | mdiff.allblocks( |
|
411 | mdiff.allblocks( | |
413 | base_text, |
|
412 | base_text, | |
414 | b''.join(b_lines), |
|
413 | b''.join(b_lines), | |
415 | lines1=base_lines, |
|
414 | lines1=base_lines, | |
416 | lines2=b_lines, |
|
415 | lines2=b_lines, | |
417 | ) |
|
416 | ) | |
418 | ) |
|
417 | ) | |
419 | a_blocks = list( |
|
418 | a_blocks = list( | |
420 | mdiff.allblocks( |
|
419 | mdiff.allblocks( | |
421 | base_text, |
|
420 | base_text, | |
422 | b''.join(a_lines), |
|
421 | b''.join(a_lines), | |
423 | lines1=base_lines, |
|
422 | lines1=base_lines, | |
424 | lines2=b_lines, |
|
423 | lines2=b_lines, | |
425 | ) |
|
424 | ) | |
426 | ) |
|
425 | ) | |
427 |
|
426 | |||
428 | def matching_lines(blocks): |
|
427 | def matching_lines(blocks): | |
429 | return sum( |
|
428 | return sum( | |
430 | block[1] - block[0] |
|
429 | block[1] - block[0] | |
431 | for block, kind in blocks |
|
430 | for block, kind in blocks | |
432 | if kind == b'=' |
|
431 | if kind == b'=' | |
433 | ) |
|
432 | ) | |
434 |
|
433 | |||
435 | def diff_lines(blocks, lines1, lines2): |
|
434 | def diff_lines(blocks, lines1, lines2): | |
436 | for block, kind in blocks: |
|
435 | for block, kind in blocks: | |
437 | if kind == b'=': |
|
436 | if kind == b'=': | |
438 | for line in lines1[block[0] : block[1]]: |
|
437 | for line in lines1[block[0] : block[1]]: | |
439 | yield b' ' + line |
|
438 | yield b' ' + line | |
440 | else: |
|
439 | else: | |
441 | for line in lines1[block[0] : block[1]]: |
|
440 | for line in lines1[block[0] : block[1]]: | |
442 | yield b'-' + line |
|
441 | yield b'-' + line | |
443 | for line in lines2[block[2] : block[3]]: |
|
442 | for line in lines2[block[2] : block[3]]: | |
444 | yield b'+' + line |
|
443 | yield b'+' + line | |
445 |
|
444 | |||
446 | lines.append(b"<<<<<<<" + newline) |
|
445 | lines.append(b"<<<<<<<" + newline) | |
447 | if matching_lines(a_blocks) < matching_lines(b_blocks): |
|
446 | if matching_lines(a_blocks) < matching_lines(b_blocks): | |
448 | lines.append(b"======= " + name_a + newline) |
|
447 | lines.append(b"======= " + name_a + newline) | |
449 | lines.extend(a_lines) |
|
448 | lines.extend(a_lines) | |
450 | lines.append(b"------- " + name_base + newline) |
|
449 | lines.append(b"------- " + name_base + newline) | |
451 | lines.append(b"+++++++ " + name_b + newline) |
|
450 | lines.append(b"+++++++ " + name_b + newline) | |
452 | lines.extend(diff_lines(b_blocks, base_lines, b_lines)) |
|
451 | lines.extend(diff_lines(b_blocks, base_lines, b_lines)) | |
453 | else: |
|
452 | else: | |
454 | lines.append(b"------- " + name_base + newline) |
|
453 | lines.append(b"------- " + name_base + newline) | |
455 | lines.append(b"+++++++ " + name_a + newline) |
|
454 | lines.append(b"+++++++ " + name_a + newline) | |
456 | lines.extend(diff_lines(a_blocks, base_lines, a_lines)) |
|
455 | lines.extend(diff_lines(a_blocks, base_lines, a_lines)) | |
457 | lines.append(b"======= " + name_b + newline) |
|
456 | lines.append(b"======= " + name_b + newline) | |
458 | lines.extend(b_lines) |
|
457 | lines.extend(b_lines) | |
459 | lines.append(b">>>>>>>" + newline) |
|
458 | lines.append(b">>>>>>>" + newline) | |
460 | conflicts = True |
|
459 | conflicts = True | |
461 | else: |
|
460 | else: | |
462 | lines.extend(group_lines) |
|
461 | lines.extend(group_lines) | |
463 | return lines, conflicts |
|
462 | return lines, conflicts | |
464 |
|
463 | |||
465 |
|
464 | |||
466 | def _resolve(m3, sides): |
|
465 | def _resolve(m3, sides): | |
467 | lines = [] |
|
466 | lines = [] | |
468 | for what, group_lines in m3.merge_groups(): |
|
467 | for what, group_lines in m3.merge_groups(): | |
469 | if what == b'conflict': |
|
468 | if what == b'conflict': | |
470 | for side in sides: |
|
469 | for side in sides: | |
471 | lines.extend(group_lines[side]) |
|
470 | lines.extend(group_lines[side]) | |
472 | else: |
|
471 | else: | |
473 | lines.extend(group_lines) |
|
472 | lines.extend(group_lines) | |
474 | return lines |
|
473 | return lines | |
475 |
|
474 | |||
476 |
|
475 | |||
477 | @attr.s |
|
|||
478 | class MergeInput(object): |
|
476 | class MergeInput(object): | |
479 | fctx = attr.ib() |
|
477 | def __init__(self, fctx, label=None, label_detail=None): | |
480 | label = attr.ib(default=None) |
|
478 | self.fctx = fctx | |
|
479 | self.label = label | |||
481 | # If the "detail" part is set, then that is rendered after the label and |
|
480 | # If the "detail" part is set, then that is rendered after the label and | |
482 |
# separated by a ':'. The label is padded to make the ':' aligned among |
|
481 | # separated by a ':'. The label is padded to make the ':' aligned among | |
483 | # merge inputs. |
|
482 | # all merge inputs. | |
484 |
label_detail = |
|
483 | self.label_detail = label_detail | |
485 |
|
484 | |||
486 |
|
485 | |||
487 | def simplemerge( |
|
486 | def simplemerge( | |
488 | ui, |
|
487 | ui, | |
489 | local, |
|
488 | local, | |
490 | base, |
|
489 | base, | |
491 | other, |
|
490 | other, | |
492 | mode=b'merge', |
|
491 | mode=b'merge', | |
493 | quiet=False, |
|
492 | quiet=False, | |
494 | allow_binary=False, |
|
493 | allow_binary=False, | |
495 | print_result=False, |
|
494 | print_result=False, | |
496 | ): |
|
495 | ): | |
497 | """Performs the simplemerge algorithm. |
|
496 | """Performs the simplemerge algorithm. | |
498 |
|
497 | |||
499 | The merged result is written into `localctx`. |
|
498 | The merged result is written into `localctx`. | |
500 | """ |
|
499 | """ | |
501 |
|
500 | |||
502 | def readctx(ctx): |
|
501 | def readctx(ctx): | |
503 | # Merges were always run in the working copy before, which means |
|
502 | # Merges were always run in the working copy before, which means | |
504 | # they used decoded data, if the user defined any repository |
|
503 | # they used decoded data, if the user defined any repository | |
505 | # filters. |
|
504 | # filters. | |
506 | # |
|
505 | # | |
507 | # Maintain that behavior today for BC, though perhaps in the future |
|
506 | # Maintain that behavior today for BC, though perhaps in the future | |
508 | # it'd be worth considering whether merging encoded data (what the |
|
507 | # it'd be worth considering whether merging encoded data (what the | |
509 | # repository usually sees) might be more useful. |
|
508 | # repository usually sees) might be more useful. | |
510 | return _verifytext( |
|
509 | return _verifytext( | |
511 | ctx.decodeddata(), |
|
510 | ctx.decodeddata(), | |
512 | ctx.path(), |
|
511 | ctx.path(), | |
513 | ui, |
|
512 | ui, | |
514 | quiet=quiet, |
|
513 | quiet=quiet, | |
515 | allow_binary=allow_binary, |
|
514 | allow_binary=allow_binary, | |
516 | ) |
|
515 | ) | |
517 |
|
516 | |||
518 | try: |
|
517 | try: | |
519 | localtext = readctx(local.fctx) |
|
518 | localtext = readctx(local.fctx) | |
520 | basetext = readctx(base.fctx) |
|
519 | basetext = readctx(base.fctx) | |
521 | othertext = readctx(other.fctx) |
|
520 | othertext = readctx(other.fctx) | |
522 | except error.Abort: |
|
521 | except error.Abort: | |
523 | return True |
|
522 | return True | |
524 |
|
523 | |||
525 | m3 = Merge3Text(basetext, localtext, othertext) |
|
524 | m3 = Merge3Text(basetext, localtext, othertext) | |
526 | conflicts = False |
|
525 | conflicts = False | |
527 | if mode == b'union': |
|
526 | if mode == b'union': | |
528 | lines = _resolve(m3, (1, 2)) |
|
527 | lines = _resolve(m3, (1, 2)) | |
529 | elif mode == b'local': |
|
528 | elif mode == b'local': | |
530 | lines = _resolve(m3, (1,)) |
|
529 | lines = _resolve(m3, (1,)) | |
531 | elif mode == b'other': |
|
530 | elif mode == b'other': | |
532 | lines = _resolve(m3, (2,)) |
|
531 | lines = _resolve(m3, (2,)) | |
533 | else: |
|
532 | else: | |
534 | if mode == b'mergediff': |
|
533 | if mode == b'mergediff': | |
535 | labels = _format_labels(local, other, base) |
|
534 | labels = _format_labels(local, other, base) | |
536 | lines, conflicts = render_mergediff(m3, *labels) |
|
535 | lines, conflicts = render_mergediff(m3, *labels) | |
537 | elif mode == b'merge3': |
|
536 | elif mode == b'merge3': | |
538 | labels = _format_labels(local, other, base) |
|
537 | labels = _format_labels(local, other, base) | |
539 | lines, conflicts = render_merge3(m3, *labels) |
|
538 | lines, conflicts = render_merge3(m3, *labels) | |
540 | else: |
|
539 | else: | |
541 | labels = _format_labels(local, other) |
|
540 | labels = _format_labels(local, other) | |
542 | lines, conflicts = render_minimized(m3, *labels) |
|
541 | lines, conflicts = render_minimized(m3, *labels) | |
543 |
|
542 | |||
544 | mergedtext = b''.join(lines) |
|
543 | mergedtext = b''.join(lines) | |
545 | if print_result: |
|
544 | if print_result: | |
546 | ui.fout.write(mergedtext) |
|
545 | ui.fout.write(mergedtext) | |
547 | else: |
|
546 | else: | |
548 | # local.fctx.flags() already has the merged flags (done in |
|
547 | # local.fctx.flags() already has the merged flags (done in | |
549 | # mergestate.resolve()) |
|
548 | # mergestate.resolve()) | |
550 | local.fctx.write(mergedtext, local.fctx.flags()) |
|
549 | local.fctx.write(mergedtext, local.fctx.flags()) | |
551 |
|
550 | |||
552 | return conflicts |
|
551 | return conflicts |
General Comments 0
You need to be logged in to leave comments.
Login now