##// END OF EJS Templates
xdiff: resolve signed unsigned comparison warning...
Jun Wu -
r36844:e5b14f5b default
parent child Browse files
Show More
@@ -1,1132 +1,1130 b''
1 /*
1 /*
2 * LibXDiff by Davide Libenzi ( File Differential Library )
2 * LibXDiff by Davide Libenzi ( File Differential Library )
3 * Copyright (C) 2003 Davide Libenzi
3 * Copyright (C) 2003 Davide Libenzi
4 *
4 *
5 * This library is free software; you can redistribute it and/or
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
8 * version 2.1 of the License, or (at your option) any later version.
9 *
9 *
10 * This library is distributed in the hope that it will be useful,
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
13 * Lesser General Public License for more details.
14 *
14 *
15 * You should have received a copy of the GNU Lesser General Public
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, see
16 * License along with this library; if not, see
17 * <http://www.gnu.org/licenses/>.
17 * <http://www.gnu.org/licenses/>.
18 *
18 *
19 * Davide Libenzi <davidel@xmailserver.org>
19 * Davide Libenzi <davidel@xmailserver.org>
20 *
20 *
21 */
21 */
22
22
23 #include "xinclude.h"
23 #include "xinclude.h"
24
24
25
25
26
26
27 #define XDL_MAX_COST_MIN 256
27 #define XDL_MAX_COST_MIN 256
28 #define XDL_HEUR_MIN_COST 256
28 #define XDL_HEUR_MIN_COST 256
29 #define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1)
29 #define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1)
30 #define XDL_SNAKE_CNT 20
30 #define XDL_SNAKE_CNT 20
31 #define XDL_K_HEUR 4
31 #define XDL_K_HEUR 4
32
32
33 /* VC 2008 doesn't know about the inline keyword. */
33 /* VC 2008 doesn't know about the inline keyword. */
34 #if defined(_MSC_VER)
34 #if defined(_MSC_VER)
35 #define inline __forceinline
35 #define inline __forceinline
36 #endif
36 #endif
37
37
38
38
39 typedef struct s_xdpsplit {
39 typedef struct s_xdpsplit {
40 int64_t i1, i2;
40 int64_t i1, i2;
41 int min_lo, min_hi;
41 int min_lo, min_hi;
42 } xdpsplit_t;
42 } xdpsplit_t;
43
43
44
44
45
45
46
46
47 static int64_t xdl_split(uint64_t const *ha1, int64_t off1, int64_t lim1,
47 static int64_t xdl_split(uint64_t const *ha1, int64_t off1, int64_t lim1,
48 uint64_t const *ha2, int64_t off2, int64_t lim2,
48 uint64_t const *ha2, int64_t off2, int64_t lim2,
49 int64_t *kvdf, int64_t *kvdb, int need_min, xdpsplit_t *spl,
49 int64_t *kvdf, int64_t *kvdb, int need_min, xdpsplit_t *spl,
50 xdalgoenv_t *xenv);
50 xdalgoenv_t *xenv);
51 static xdchange_t *xdl_add_change(xdchange_t *xscr, int64_t i1, int64_t i2, int64_t chg1, int64_t chg2);
51 static xdchange_t *xdl_add_change(xdchange_t *xscr, int64_t i1, int64_t i2, int64_t chg1, int64_t chg2);
52
52
53
53
54
54
55
55
56
56
57 /*
57 /*
58 * See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers.
58 * See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers.
59 * Basically considers a "box" (off1, off2, lim1, lim2) and scan from both
59 * Basically considers a "box" (off1, off2, lim1, lim2) and scan from both
60 * the forward diagonal starting from (off1, off2) and the backward diagonal
60 * the forward diagonal starting from (off1, off2) and the backward diagonal
61 * starting from (lim1, lim2). If the K values on the same diagonal crosses
61 * starting from (lim1, lim2). If the K values on the same diagonal crosses
62 * returns the furthest point of reach. We might end up having to expensive
62 * returns the furthest point of reach. We might end up having to expensive
63 * cases using this algorithm is full, so a little bit of heuristic is needed
63 * cases using this algorithm is full, so a little bit of heuristic is needed
64 * to cut the search and to return a suboptimal point.
64 * to cut the search and to return a suboptimal point.
65 */
65 */
66 static int64_t xdl_split(uint64_t const *ha1, int64_t off1, int64_t lim1,
66 static int64_t xdl_split(uint64_t const *ha1, int64_t off1, int64_t lim1,
67 uint64_t const *ha2, int64_t off2, int64_t lim2,
67 uint64_t const *ha2, int64_t off2, int64_t lim2,
68 int64_t *kvdf, int64_t *kvdb, int need_min, xdpsplit_t *spl,
68 int64_t *kvdf, int64_t *kvdb, int need_min, xdpsplit_t *spl,
69 xdalgoenv_t *xenv) {
69 xdalgoenv_t *xenv) {
70 int64_t dmin = off1 - lim2, dmax = lim1 - off2;
70 int64_t dmin = off1 - lim2, dmax = lim1 - off2;
71 int64_t fmid = off1 - off2, bmid = lim1 - lim2;
71 int64_t fmid = off1 - off2, bmid = lim1 - lim2;
72 int64_t odd = (fmid - bmid) & 1;
72 int64_t odd = (fmid - bmid) & 1;
73 int64_t fmin = fmid, fmax = fmid;
73 int64_t fmin = fmid, fmax = fmid;
74 int64_t bmin = bmid, bmax = bmid;
74 int64_t bmin = bmid, bmax = bmid;
75 int64_t ec, d, i1, i2, prev1, best, dd, v, k;
75 int64_t ec, d, i1, i2, prev1, best, dd, v, k;
76
76
77 /*
77 /*
78 * Set initial diagonal values for both forward and backward path.
78 * Set initial diagonal values for both forward and backward path.
79 */
79 */
80 kvdf[fmid] = off1;
80 kvdf[fmid] = off1;
81 kvdb[bmid] = lim1;
81 kvdb[bmid] = lim1;
82
82
83 for (ec = 1;; ec++) {
83 for (ec = 1;; ec++) {
84 int got_snake = 0;
84 int got_snake = 0;
85
85
86 /*
86 /*
87 * We need to extent the diagonal "domain" by one. If the next
87 * We need to extent the diagonal "domain" by one. If the next
88 * values exits the box boundaries we need to change it in the
88 * values exits the box boundaries we need to change it in the
89 * opposite direction because (max - min) must be a power of two.
89 * opposite direction because (max - min) must be a power of two.
90 * Also we initialize the external K value to -1 so that we can
90 * Also we initialize the external K value to -1 so that we can
91 * avoid extra conditions check inside the core loop.
91 * avoid extra conditions check inside the core loop.
92 */
92 */
93 if (fmin > dmin)
93 if (fmin > dmin)
94 kvdf[--fmin - 1] = -1;
94 kvdf[--fmin - 1] = -1;
95 else
95 else
96 ++fmin;
96 ++fmin;
97 if (fmax < dmax)
97 if (fmax < dmax)
98 kvdf[++fmax + 1] = -1;
98 kvdf[++fmax + 1] = -1;
99 else
99 else
100 --fmax;
100 --fmax;
101
101
102 for (d = fmax; d >= fmin; d -= 2) {
102 for (d = fmax; d >= fmin; d -= 2) {
103 if (kvdf[d - 1] >= kvdf[d + 1])
103 if (kvdf[d - 1] >= kvdf[d + 1])
104 i1 = kvdf[d - 1] + 1;
104 i1 = kvdf[d - 1] + 1;
105 else
105 else
106 i1 = kvdf[d + 1];
106 i1 = kvdf[d + 1];
107 prev1 = i1;
107 prev1 = i1;
108 i2 = i1 - d;
108 i2 = i1 - d;
109 for (; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++);
109 for (; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++);
110 if (i1 - prev1 > xenv->snake_cnt)
110 if (i1 - prev1 > xenv->snake_cnt)
111 got_snake = 1;
111 got_snake = 1;
112 kvdf[d] = i1;
112 kvdf[d] = i1;
113 if (odd && bmin <= d && d <= bmax && kvdb[d] <= i1) {
113 if (odd && bmin <= d && d <= bmax && kvdb[d] <= i1) {
114 spl->i1 = i1;
114 spl->i1 = i1;
115 spl->i2 = i2;
115 spl->i2 = i2;
116 spl->min_lo = spl->min_hi = 1;
116 spl->min_lo = spl->min_hi = 1;
117 return ec;
117 return ec;
118 }
118 }
119 }
119 }
120
120
121 /*
121 /*
122 * We need to extent the diagonal "domain" by one. If the next
122 * We need to extent the diagonal "domain" by one. If the next
123 * values exits the box boundaries we need to change it in the
123 * values exits the box boundaries we need to change it in the
124 * opposite direction because (max - min) must be a power of two.
124 * opposite direction because (max - min) must be a power of two.
125 * Also we initialize the external K value to -1 so that we can
125 * Also we initialize the external K value to -1 so that we can
126 * avoid extra conditions check inside the core loop.
126 * avoid extra conditions check inside the core loop.
127 */
127 */
128 if (bmin > dmin)
128 if (bmin > dmin)
129 kvdb[--bmin - 1] = XDL_LINE_MAX;
129 kvdb[--bmin - 1] = XDL_LINE_MAX;
130 else
130 else
131 ++bmin;
131 ++bmin;
132 if (bmax < dmax)
132 if (bmax < dmax)
133 kvdb[++bmax + 1] = XDL_LINE_MAX;
133 kvdb[++bmax + 1] = XDL_LINE_MAX;
134 else
134 else
135 --bmax;
135 --bmax;
136
136
137 for (d = bmax; d >= bmin; d -= 2) {
137 for (d = bmax; d >= bmin; d -= 2) {
138 if (kvdb[d - 1] < kvdb[d + 1])
138 if (kvdb[d - 1] < kvdb[d + 1])
139 i1 = kvdb[d - 1];
139 i1 = kvdb[d - 1];
140 else
140 else
141 i1 = kvdb[d + 1] - 1;
141 i1 = kvdb[d + 1] - 1;
142 prev1 = i1;
142 prev1 = i1;
143 i2 = i1 - d;
143 i2 = i1 - d;
144 for (; i1 > off1 && i2 > off2 && ha1[i1 - 1] == ha2[i2 - 1]; i1--, i2--);
144 for (; i1 > off1 && i2 > off2 && ha1[i1 - 1] == ha2[i2 - 1]; i1--, i2--);
145 if (prev1 - i1 > xenv->snake_cnt)
145 if (prev1 - i1 > xenv->snake_cnt)
146 got_snake = 1;
146 got_snake = 1;
147 kvdb[d] = i1;
147 kvdb[d] = i1;
148 if (!odd && fmin <= d && d <= fmax && i1 <= kvdf[d]) {
148 if (!odd && fmin <= d && d <= fmax && i1 <= kvdf[d]) {
149 spl->i1 = i1;
149 spl->i1 = i1;
150 spl->i2 = i2;
150 spl->i2 = i2;
151 spl->min_lo = spl->min_hi = 1;
151 spl->min_lo = spl->min_hi = 1;
152 return ec;
152 return ec;
153 }
153 }
154 }
154 }
155
155
156 if (need_min)
156 if (need_min)
157 continue;
157 continue;
158
158
159 /*
159 /*
160 * If the edit cost is above the heuristic trigger and if
160 * If the edit cost is above the heuristic trigger and if
161 * we got a good snake, we sample current diagonals to see
161 * we got a good snake, we sample current diagonals to see
162 * if some of the, have reached an "interesting" path. Our
162 * if some of the, have reached an "interesting" path. Our
163 * measure is a function of the distance from the diagonal
163 * measure is a function of the distance from the diagonal
164 * corner (i1 + i2) penalized with the distance from the
164 * corner (i1 + i2) penalized with the distance from the
165 * mid diagonal itself. If this value is above the current
165 * mid diagonal itself. If this value is above the current
166 * edit cost times a magic factor (XDL_K_HEUR) we consider
166 * edit cost times a magic factor (XDL_K_HEUR) we consider
167 * it interesting.
167 * it interesting.
168 */
168 */
169 if (got_snake && ec > xenv->heur_min) {
169 if (got_snake && ec > xenv->heur_min) {
170 for (best = 0, d = fmax; d >= fmin; d -= 2) {
170 for (best = 0, d = fmax; d >= fmin; d -= 2) {
171 dd = d > fmid ? d - fmid: fmid - d;
171 dd = d > fmid ? d - fmid: fmid - d;
172 i1 = kvdf[d];
172 i1 = kvdf[d];
173 i2 = i1 - d;
173 i2 = i1 - d;
174 v = (i1 - off1) + (i2 - off2) - dd;
174 v = (i1 - off1) + (i2 - off2) - dd;
175
175
176 if (v > XDL_K_HEUR * ec && v > best &&
176 if (v > XDL_K_HEUR * ec && v > best &&
177 off1 + xenv->snake_cnt <= i1 && i1 < lim1 &&
177 off1 + xenv->snake_cnt <= i1 && i1 < lim1 &&
178 off2 + xenv->snake_cnt <= i2 && i2 < lim2) {
178 off2 + xenv->snake_cnt <= i2 && i2 < lim2) {
179 for (k = 1; ha1[i1 - k] == ha2[i2 - k]; k++)
179 for (k = 1; ha1[i1 - k] == ha2[i2 - k]; k++)
180 if (k == xenv->snake_cnt) {
180 if (k == xenv->snake_cnt) {
181 best = v;
181 best = v;
182 spl->i1 = i1;
182 spl->i1 = i1;
183 spl->i2 = i2;
183 spl->i2 = i2;
184 break;
184 break;
185 }
185 }
186 }
186 }
187 }
187 }
188 if (best > 0) {
188 if (best > 0) {
189 spl->min_lo = 1;
189 spl->min_lo = 1;
190 spl->min_hi = 0;
190 spl->min_hi = 0;
191 return ec;
191 return ec;
192 }
192 }
193
193
194 for (best = 0, d = bmax; d >= bmin; d -= 2) {
194 for (best = 0, d = bmax; d >= bmin; d -= 2) {
195 dd = d > bmid ? d - bmid: bmid - d;
195 dd = d > bmid ? d - bmid: bmid - d;
196 i1 = kvdb[d];
196 i1 = kvdb[d];
197 i2 = i1 - d;
197 i2 = i1 - d;
198 v = (lim1 - i1) + (lim2 - i2) - dd;
198 v = (lim1 - i1) + (lim2 - i2) - dd;
199
199
200 if (v > XDL_K_HEUR * ec && v > best &&
200 if (v > XDL_K_HEUR * ec && v > best &&
201 off1 < i1 && i1 <= lim1 - xenv->snake_cnt &&
201 off1 < i1 && i1 <= lim1 - xenv->snake_cnt &&
202 off2 < i2 && i2 <= lim2 - xenv->snake_cnt) {
202 off2 < i2 && i2 <= lim2 - xenv->snake_cnt) {
203 for (k = 0; ha1[i1 + k] == ha2[i2 + k]; k++)
203 for (k = 0; ha1[i1 + k] == ha2[i2 + k]; k++)
204 if (k == xenv->snake_cnt - 1) {
204 if (k == xenv->snake_cnt - 1) {
205 best = v;
205 best = v;
206 spl->i1 = i1;
206 spl->i1 = i1;
207 spl->i2 = i2;
207 spl->i2 = i2;
208 break;
208 break;
209 }
209 }
210 }
210 }
211 }
211 }
212 if (best > 0) {
212 if (best > 0) {
213 spl->min_lo = 0;
213 spl->min_lo = 0;
214 spl->min_hi = 1;
214 spl->min_hi = 1;
215 return ec;
215 return ec;
216 }
216 }
217 }
217 }
218
218
219 /*
219 /*
220 * Enough is enough. We spent too much time here and now we collect
220 * Enough is enough. We spent too much time here and now we collect
221 * the furthest reaching path using the (i1 + i2) measure.
221 * the furthest reaching path using the (i1 + i2) measure.
222 */
222 */
223 if (ec >= xenv->mxcost) {
223 if (ec >= xenv->mxcost) {
224 int64_t fbest, fbest1, bbest, bbest1;
224 int64_t fbest, fbest1, bbest, bbest1;
225
225
226 fbest = fbest1 = -1;
226 fbest = fbest1 = -1;
227 for (d = fmax; d >= fmin; d -= 2) {
227 for (d = fmax; d >= fmin; d -= 2) {
228 i1 = XDL_MIN(kvdf[d], lim1);
228 i1 = XDL_MIN(kvdf[d], lim1);
229 i2 = i1 - d;
229 i2 = i1 - d;
230 if (lim2 < i2)
230 if (lim2 < i2)
231 i1 = lim2 + d, i2 = lim2;
231 i1 = lim2 + d, i2 = lim2;
232 if (fbest < i1 + i2) {
232 if (fbest < i1 + i2) {
233 fbest = i1 + i2;
233 fbest = i1 + i2;
234 fbest1 = i1;
234 fbest1 = i1;
235 }
235 }
236 }
236 }
237
237
238 bbest = bbest1 = XDL_LINE_MAX;
238 bbest = bbest1 = XDL_LINE_MAX;
239 for (d = bmax; d >= bmin; d -= 2) {
239 for (d = bmax; d >= bmin; d -= 2) {
240 i1 = XDL_MAX(off1, kvdb[d]);
240 i1 = XDL_MAX(off1, kvdb[d]);
241 i2 = i1 - d;
241 i2 = i1 - d;
242 if (i2 < off2)
242 if (i2 < off2)
243 i1 = off2 + d, i2 = off2;
243 i1 = off2 + d, i2 = off2;
244 if (i1 + i2 < bbest) {
244 if (i1 + i2 < bbest) {
245 bbest = i1 + i2;
245 bbest = i1 + i2;
246 bbest1 = i1;
246 bbest1 = i1;
247 }
247 }
248 }
248 }
249
249
250 if ((lim1 + lim2) - bbest < fbest - (off1 + off2)) {
250 if ((lim1 + lim2) - bbest < fbest - (off1 + off2)) {
251 spl->i1 = fbest1;
251 spl->i1 = fbest1;
252 spl->i2 = fbest - fbest1;
252 spl->i2 = fbest - fbest1;
253 spl->min_lo = 1;
253 spl->min_lo = 1;
254 spl->min_hi = 0;
254 spl->min_hi = 0;
255 } else {
255 } else {
256 spl->i1 = bbest1;
256 spl->i1 = bbest1;
257 spl->i2 = bbest - bbest1;
257 spl->i2 = bbest - bbest1;
258 spl->min_lo = 0;
258 spl->min_lo = 0;
259 spl->min_hi = 1;
259 spl->min_hi = 1;
260 }
260 }
261 return ec;
261 return ec;
262 }
262 }
263 }
263 }
264 }
264 }
265
265
266
266
267 /*
267 /*
268 * Rule: "Divide et Impera". Recursively split the box in sub-boxes by calling
268 * Rule: "Divide et Impera". Recursively split the box in sub-boxes by calling
269 * the box splitting function. Note that the real job (marking changed lines)
269 * the box splitting function. Note that the real job (marking changed lines)
270 * is done in the two boundary reaching checks.
270 * is done in the two boundary reaching checks.
271 */
271 */
272 int xdl_recs_cmp(diffdata_t *dd1, int64_t off1, int64_t lim1,
272 int xdl_recs_cmp(diffdata_t *dd1, int64_t off1, int64_t lim1,
273 diffdata_t *dd2, int64_t off2, int64_t lim2,
273 diffdata_t *dd2, int64_t off2, int64_t lim2,
274 int64_t *kvdf, int64_t *kvdb, int need_min, xdalgoenv_t *xenv) {
274 int64_t *kvdf, int64_t *kvdb, int need_min, xdalgoenv_t *xenv) {
275 uint64_t const *ha1 = dd1->ha, *ha2 = dd2->ha;
275 uint64_t const *ha1 = dd1->ha, *ha2 = dd2->ha;
276
276
277 /*
277 /*
278 * Shrink the box by walking through each diagonal snake (SW and NE).
278 * Shrink the box by walking through each diagonal snake (SW and NE).
279 */
279 */
280 for (; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++);
280 for (; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++);
281 for (; off1 < lim1 && off2 < lim2 && ha1[lim1 - 1] == ha2[lim2 - 1]; lim1--, lim2--);
281 for (; off1 < lim1 && off2 < lim2 && ha1[lim1 - 1] == ha2[lim2 - 1]; lim1--, lim2--);
282
282
283 /*
283 /*
284 * If one dimension is empty, then all records on the other one must
284 * If one dimension is empty, then all records on the other one must
285 * be obviously changed.
285 * be obviously changed.
286 */
286 */
287 if (off1 == lim1) {
287 if (off1 == lim1) {
288 char *rchg2 = dd2->rchg;
288 char *rchg2 = dd2->rchg;
289 int64_t *rindex2 = dd2->rindex;
289 int64_t *rindex2 = dd2->rindex;
290
290
291 for (; off2 < lim2; off2++)
291 for (; off2 < lim2; off2++)
292 rchg2[rindex2[off2]] = 1;
292 rchg2[rindex2[off2]] = 1;
293 } else if (off2 == lim2) {
293 } else if (off2 == lim2) {
294 char *rchg1 = dd1->rchg;
294 char *rchg1 = dd1->rchg;
295 int64_t *rindex1 = dd1->rindex;
295 int64_t *rindex1 = dd1->rindex;
296
296
297 for (; off1 < lim1; off1++)
297 for (; off1 < lim1; off1++)
298 rchg1[rindex1[off1]] = 1;
298 rchg1[rindex1[off1]] = 1;
299 } else {
299 } else {
300 xdpsplit_t spl;
300 xdpsplit_t spl;
301 spl.i1 = spl.i2 = 0;
301 spl.i1 = spl.i2 = 0;
302
302
303 /*
303 /*
304 * Divide ...
304 * Divide ...
305 */
305 */
306 if (xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb,
306 if (xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb,
307 need_min, &spl, xenv) < 0) {
307 need_min, &spl, xenv) < 0) {
308
308
309 return -1;
309 return -1;
310 }
310 }
311
311
312 /*
312 /*
313 * ... et Impera.
313 * ... et Impera.
314 */
314 */
315 if (xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2,
315 if (xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2,
316 kvdf, kvdb, spl.min_lo, xenv) < 0 ||
316 kvdf, kvdb, spl.min_lo, xenv) < 0 ||
317 xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2,
317 xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2,
318 kvdf, kvdb, spl.min_hi, xenv) < 0) {
318 kvdf, kvdb, spl.min_hi, xenv) < 0) {
319
319
320 return -1;
320 return -1;
321 }
321 }
322 }
322 }
323
323
324 return 0;
324 return 0;
325 }
325 }
326
326
327
327
328 int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
328 int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
329 xdfenv_t *xe) {
329 xdfenv_t *xe) {
330 int64_t ndiags;
330 int64_t ndiags;
331 int64_t *kvd, *kvdf, *kvdb;
331 int64_t *kvd, *kvdf, *kvdb;
332 xdalgoenv_t xenv;
332 xdalgoenv_t xenv;
333 diffdata_t dd1, dd2;
333 diffdata_t dd1, dd2;
334
334
335 if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0) {
335 if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0) {
336
336
337 return -1;
337 return -1;
338 }
338 }
339
339
340 /*
340 /*
341 * Allocate and setup K vectors to be used by the differential algorithm.
341 * Allocate and setup K vectors to be used by the differential algorithm.
342 * One is to store the forward path and one to store the backward path.
342 * One is to store the forward path and one to store the backward path.
343 */
343 */
344 ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3;
344 ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3;
345 if (!(kvd = (int64_t *) xdl_malloc((2 * ndiags + 2) * sizeof(long)))) {
345 if (!(kvd = (int64_t *) xdl_malloc((2 * ndiags + 2) * sizeof(long)))) {
346
346
347 xdl_free_env(xe);
347 xdl_free_env(xe);
348 return -1;
348 return -1;
349 }
349 }
350 kvdf = kvd;
350 kvdf = kvd;
351 kvdb = kvdf + ndiags;
351 kvdb = kvdf + ndiags;
352 kvdf += xe->xdf2.nreff + 1;
352 kvdf += xe->xdf2.nreff + 1;
353 kvdb += xe->xdf2.nreff + 1;
353 kvdb += xe->xdf2.nreff + 1;
354
354
355 xenv.mxcost = xdl_bogosqrt(ndiags);
355 xenv.mxcost = xdl_bogosqrt(ndiags);
356 if (xenv.mxcost < XDL_MAX_COST_MIN)
356 if (xenv.mxcost < XDL_MAX_COST_MIN)
357 xenv.mxcost = XDL_MAX_COST_MIN;
357 xenv.mxcost = XDL_MAX_COST_MIN;
358 xenv.snake_cnt = XDL_SNAKE_CNT;
358 xenv.snake_cnt = XDL_SNAKE_CNT;
359 xenv.heur_min = XDL_HEUR_MIN_COST;
359 xenv.heur_min = XDL_HEUR_MIN_COST;
360
360
361 dd1.nrec = xe->xdf1.nreff;
361 dd1.nrec = xe->xdf1.nreff;
362 dd1.ha = xe->xdf1.ha;
362 dd1.ha = xe->xdf1.ha;
363 dd1.rchg = xe->xdf1.rchg;
363 dd1.rchg = xe->xdf1.rchg;
364 dd1.rindex = xe->xdf1.rindex;
364 dd1.rindex = xe->xdf1.rindex;
365 dd2.nrec = xe->xdf2.nreff;
365 dd2.nrec = xe->xdf2.nreff;
366 dd2.ha = xe->xdf2.ha;
366 dd2.ha = xe->xdf2.ha;
367 dd2.rchg = xe->xdf2.rchg;
367 dd2.rchg = xe->xdf2.rchg;
368 dd2.rindex = xe->xdf2.rindex;
368 dd2.rindex = xe->xdf2.rindex;
369
369
370 if (xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec,
370 if (xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec,
371 kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0, &xenv) < 0) {
371 kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0, &xenv) < 0) {
372
372
373 xdl_free(kvd);
373 xdl_free(kvd);
374 xdl_free_env(xe);
374 xdl_free_env(xe);
375 return -1;
375 return -1;
376 }
376 }
377
377
378 xdl_free(kvd);
378 xdl_free(kvd);
379
379
380 return 0;
380 return 0;
381 }
381 }
382
382
383
383
384 static xdchange_t *xdl_add_change(xdchange_t *xscr, int64_t i1, int64_t i2, int64_t chg1, int64_t chg2) {
384 static xdchange_t *xdl_add_change(xdchange_t *xscr, int64_t i1, int64_t i2, int64_t chg1, int64_t chg2) {
385 xdchange_t *xch;
385 xdchange_t *xch;
386
386
387 if (!(xch = (xdchange_t *) xdl_malloc(sizeof(xdchange_t))))
387 if (!(xch = (xdchange_t *) xdl_malloc(sizeof(xdchange_t))))
388 return NULL;
388 return NULL;
389
389
390 xch->next = xscr;
390 xch->next = xscr;
391 xch->i1 = i1;
391 xch->i1 = i1;
392 xch->i2 = i2;
392 xch->i2 = i2;
393 xch->chg1 = chg1;
393 xch->chg1 = chg1;
394 xch->chg2 = chg2;
394 xch->chg2 = chg2;
395 xch->ignore = 0;
395 xch->ignore = 0;
396
396
397 return xch;
397 return xch;
398 }
398 }
399
399
400
400
401 static int recs_match(xrecord_t *rec1, xrecord_t *rec2)
401 static int recs_match(xrecord_t *rec1, xrecord_t *rec2)
402 {
402 {
403 return (rec1->ha == rec2->ha &&
403 return (rec1->ha == rec2->ha &&
404 xdl_recmatch(rec1->ptr, rec1->size,
404 xdl_recmatch(rec1->ptr, rec1->size,
405 rec2->ptr, rec2->size));
405 rec2->ptr, rec2->size));
406 }
406 }
407
407
408 /*
408 /*
409 * If a line is indented more than this, get_indent() just returns this value.
409 * If a line is indented more than this, get_indent() just returns this value.
410 * This avoids having to do absurd amounts of work for data that are not
410 * This avoids having to do absurd amounts of work for data that are not
411 * human-readable text, and also ensures that the output of get_indent fits within
411 * human-readable text, and also ensures that the output of get_indent fits within
412 * an int.
412 * an int.
413 */
413 */
414 #define MAX_INDENT 200
414 #define MAX_INDENT 200
415
415
416 /*
416 /*
417 * Return the amount of indentation of the specified line, treating TAB as 8
417 * Return the amount of indentation of the specified line, treating TAB as 8
418 * columns. Return -1 if line is empty or contains only whitespace. Clamp the
418 * columns. Return -1 if line is empty or contains only whitespace. Clamp the
419 * output value at MAX_INDENT.
419 * output value at MAX_INDENT.
420 */
420 */
421 static int get_indent(xrecord_t *rec)
421 static int get_indent(xrecord_t *rec)
422 {
422 {
423 int64_t i;
423 int64_t i;
424 int ret = 0;
424 int ret = 0;
425
425
426 for (i = 0; i < rec->size; i++) {
426 for (i = 0; i < rec->size; i++) {
427 char c = rec->ptr[i];
427 char c = rec->ptr[i];
428
428
429 if (!XDL_ISSPACE(c))
429 if (!XDL_ISSPACE(c))
430 return ret;
430 return ret;
431 else if (c == ' ')
431 else if (c == ' ')
432 ret += 1;
432 ret += 1;
433 else if (c == '\t')
433 else if (c == '\t')
434 ret += 8 - ret % 8;
434 ret += 8 - ret % 8;
435 /* ignore other whitespace characters */
435 /* ignore other whitespace characters */
436
436
437 if (ret >= MAX_INDENT)
437 if (ret >= MAX_INDENT)
438 return MAX_INDENT;
438 return MAX_INDENT;
439 }
439 }
440
440
441 /* The line contains only whitespace. */
441 /* The line contains only whitespace. */
442 return -1;
442 return -1;
443 }
443 }
444
444
445 /*
445 /*
446 * If more than this number of consecutive blank rows are found, just return this
446 * If more than this number of consecutive blank rows are found, just return this
447 * value. This avoids requiring O(N^2) work for pathological cases, and also
447 * value. This avoids requiring O(N^2) work for pathological cases, and also
448 * ensures that the output of score_split fits in an int.
448 * ensures that the output of score_split fits in an int.
449 */
449 */
450 #define MAX_BLANKS 20
450 #define MAX_BLANKS 20
451
451
452 /* Characteristics measured about a hypothetical split position. */
452 /* Characteristics measured about a hypothetical split position. */
453 struct split_measurement {
453 struct split_measurement {
454 /*
454 /*
455 * Is the split at the end of the file (aside from any blank lines)?
455 * Is the split at the end of the file (aside from any blank lines)?
456 */
456 */
457 int end_of_file;
457 int end_of_file;
458
458
459 /*
459 /*
460 * How much is the line immediately following the split indented (or -1 if
460 * How much is the line immediately following the split indented (or -1 if
461 * the line is blank):
461 * the line is blank):
462 */
462 */
463 int indent;
463 int indent;
464
464
465 /*
465 /*
466 * How many consecutive lines above the split are blank?
466 * How many consecutive lines above the split are blank?
467 */
467 */
468 int pre_blank;
468 int pre_blank;
469
469
470 /*
470 /*
471 * How much is the nearest non-blank line above the split indented (or -1
471 * How much is the nearest non-blank line above the split indented (or -1
472 * if there is no such line)?
472 * if there is no such line)?
473 */
473 */
474 int pre_indent;
474 int pre_indent;
475
475
476 /*
476 /*
477 * How many lines after the line following the split are blank?
477 * How many lines after the line following the split are blank?
478 */
478 */
479 int post_blank;
479 int post_blank;
480
480
481 /*
481 /*
482 * How much is the nearest non-blank line after the line following the
482 * How much is the nearest non-blank line after the line following the
483 * split indented (or -1 if there is no such line)?
483 * split indented (or -1 if there is no such line)?
484 */
484 */
485 int post_indent;
485 int post_indent;
486 };
486 };
487
487
488 struct split_score {
488 struct split_score {
489 /* The effective indent of this split (smaller is preferred). */
489 /* The effective indent of this split (smaller is preferred). */
490 int effective_indent;
490 int effective_indent;
491
491
492 /* Penalty for this split (smaller is preferred). */
492 /* Penalty for this split (smaller is preferred). */
493 int penalty;
493 int penalty;
494 };
494 };
495
495
496 /*
496 /*
497 * Fill m with information about a hypothetical split of xdf above line split.
497 * Fill m with information about a hypothetical split of xdf above line split.
498 */
498 */
499 static void measure_split(const xdfile_t *xdf, int64_t split,
499 static void measure_split(const xdfile_t *xdf, int64_t split,
500 struct split_measurement *m)
500 struct split_measurement *m)
501 {
501 {
502 int64_t i;
502 int64_t i;
503
503
504 if (split >= xdf->nrec) {
504 if (split >= xdf->nrec) {
505 m->end_of_file = 1;
505 m->end_of_file = 1;
506 m->indent = -1;
506 m->indent = -1;
507 } else {
507 } else {
508 m->end_of_file = 0;
508 m->end_of_file = 0;
509 m->indent = get_indent(xdf->recs[split]);
509 m->indent = get_indent(xdf->recs[split]);
510 }
510 }
511
511
512 m->pre_blank = 0;
512 m->pre_blank = 0;
513 m->pre_indent = -1;
513 m->pre_indent = -1;
514 for (i = split - 1; i >= 0; i--) {
514 for (i = split - 1; i >= 0; i--) {
515 m->pre_indent = get_indent(xdf->recs[i]);
515 m->pre_indent = get_indent(xdf->recs[i]);
516 if (m->pre_indent != -1)
516 if (m->pre_indent != -1)
517 break;
517 break;
518 m->pre_blank += 1;
518 m->pre_blank += 1;
519 if (m->pre_blank == MAX_BLANKS) {
519 if (m->pre_blank == MAX_BLANKS) {
520 m->pre_indent = 0;
520 m->pre_indent = 0;
521 break;
521 break;
522 }
522 }
523 }
523 }
524
524
525 m->post_blank = 0;
525 m->post_blank = 0;
526 m->post_indent = -1;
526 m->post_indent = -1;
527 for (i = split + 1; i < xdf->nrec; i++) {
527 for (i = split + 1; i < xdf->nrec; i++) {
528 m->post_indent = get_indent(xdf->recs[i]);
528 m->post_indent = get_indent(xdf->recs[i]);
529 if (m->post_indent != -1)
529 if (m->post_indent != -1)
530 break;
530 break;
531 m->post_blank += 1;
531 m->post_blank += 1;
532 if (m->post_blank == MAX_BLANKS) {
532 if (m->post_blank == MAX_BLANKS) {
533 m->post_indent = 0;
533 m->post_indent = 0;
534 break;
534 break;
535 }
535 }
536 }
536 }
537 }
537 }
538
538
539 /*
539 /*
540 * The empirically-determined weight factors used by score_split() below.
540 * The empirically-determined weight factors used by score_split() below.
541 * Larger values means that the position is a less favorable place to split.
541 * Larger values means that the position is a less favorable place to split.
542 *
542 *
543 * Note that scores are only ever compared against each other, so multiplying
543 * Note that scores are only ever compared against each other, so multiplying
544 * all of these weight/penalty values by the same factor wouldn't change the
544 * all of these weight/penalty values by the same factor wouldn't change the
545 * heuristic's behavior. Still, we need to set that arbitrary scale *somehow*.
545 * heuristic's behavior. Still, we need to set that arbitrary scale *somehow*.
546 * In practice, these numbers are chosen to be large enough that they can be
546 * In practice, these numbers are chosen to be large enough that they can be
547 * adjusted relative to each other with sufficient precision despite using
547 * adjusted relative to each other with sufficient precision despite using
548 * integer math.
548 * integer math.
549 */
549 */
550
550
551 /* Penalty if there are no non-blank lines before the split */
551 /* Penalty if there are no non-blank lines before the split */
552 #define START_OF_FILE_PENALTY 1
552 #define START_OF_FILE_PENALTY 1
553
553
554 /* Penalty if there are no non-blank lines after the split */
554 /* Penalty if there are no non-blank lines after the split */
555 #define END_OF_FILE_PENALTY 21
555 #define END_OF_FILE_PENALTY 21
556
556
557 /* Multiplier for the number of blank lines around the split */
557 /* Multiplier for the number of blank lines around the split */
558 #define TOTAL_BLANK_WEIGHT (-30)
558 #define TOTAL_BLANK_WEIGHT (-30)
559
559
560 /* Multiplier for the number of blank lines after the split */
560 /* Multiplier for the number of blank lines after the split */
561 #define POST_BLANK_WEIGHT 6
561 #define POST_BLANK_WEIGHT 6
562
562
563 /*
563 /*
564 * Penalties applied if the line is indented more than its predecessor
564 * Penalties applied if the line is indented more than its predecessor
565 */
565 */
566 #define RELATIVE_INDENT_PENALTY (-4)
566 #define RELATIVE_INDENT_PENALTY (-4)
567 #define RELATIVE_INDENT_WITH_BLANK_PENALTY 10
567 #define RELATIVE_INDENT_WITH_BLANK_PENALTY 10
568
568
569 /*
569 /*
570 * Penalties applied if the line is indented less than both its predecessor and
570 * Penalties applied if the line is indented less than both its predecessor and
571 * its successor
571 * its successor
572 */
572 */
573 #define RELATIVE_OUTDENT_PENALTY 24
573 #define RELATIVE_OUTDENT_PENALTY 24
574 #define RELATIVE_OUTDENT_WITH_BLANK_PENALTY 17
574 #define RELATIVE_OUTDENT_WITH_BLANK_PENALTY 17
575
575
576 /*
576 /*
577 * Penalties applied if the line is indented less than its predecessor but not
577 * Penalties applied if the line is indented less than its predecessor but not
578 * less than its successor
578 * less than its successor
579 */
579 */
580 #define RELATIVE_DEDENT_PENALTY 23
580 #define RELATIVE_DEDENT_PENALTY 23
581 #define RELATIVE_DEDENT_WITH_BLANK_PENALTY 17
581 #define RELATIVE_DEDENT_WITH_BLANK_PENALTY 17
582
582
583 /*
583 /*
584 * We only consider whether the sum of the effective indents for splits are
584 * We only consider whether the sum of the effective indents for splits are
585 * less than (-1), equal to (0), or greater than (+1) each other. The resulting
585 * less than (-1), equal to (0), or greater than (+1) each other. The resulting
586 * value is multiplied by the following weight and combined with the penalty to
586 * value is multiplied by the following weight and combined with the penalty to
587 * determine the better of two scores.
587 * determine the better of two scores.
588 */
588 */
589 #define INDENT_WEIGHT 60
589 #define INDENT_WEIGHT 60
590
590
591 /*
591 /*
592 * Compute a badness score for the hypothetical split whose measurements are
592 * Compute a badness score for the hypothetical split whose measurements are
593 * stored in m. The weight factors were determined empirically using the tools and
593 * stored in m. The weight factors were determined empirically using the tools and
594 * corpus described in
594 * corpus described in
595 *
595 *
596 * https://github.com/mhagger/diff-slider-tools
596 * https://github.com/mhagger/diff-slider-tools
597 *
597 *
598 * Also see that project if you want to improve the weights based on, for example,
598 * Also see that project if you want to improve the weights based on, for example,
599 * a larger or more diverse corpus.
599 * a larger or more diverse corpus.
600 */
600 */
601 static void score_add_split(const struct split_measurement *m, struct split_score *s)
601 static void score_add_split(const struct split_measurement *m, struct split_score *s)
602 {
602 {
603 /*
603 /*
604 * A place to accumulate penalty factors (positive makes this index more
604 * A place to accumulate penalty factors (positive makes this index more
605 * favored):
605 * favored):
606 */
606 */
607 int post_blank, total_blank, indent, any_blanks;
607 int post_blank, total_blank, indent, any_blanks;
608
608
609 if (m->pre_indent == -1 && m->pre_blank == 0)
609 if (m->pre_indent == -1 && m->pre_blank == 0)
610 s->penalty += START_OF_FILE_PENALTY;
610 s->penalty += START_OF_FILE_PENALTY;
611
611
612 if (m->end_of_file)
612 if (m->end_of_file)
613 s->penalty += END_OF_FILE_PENALTY;
613 s->penalty += END_OF_FILE_PENALTY;
614
614
615 /*
615 /*
616 * Set post_blank to the number of blank lines following the split,
616 * Set post_blank to the number of blank lines following the split,
617 * including the line immediately after the split:
617 * including the line immediately after the split:
618 */
618 */
619 post_blank = (m->indent == -1) ? 1 + m->post_blank : 0;
619 post_blank = (m->indent == -1) ? 1 + m->post_blank : 0;
620 total_blank = m->pre_blank + post_blank;
620 total_blank = m->pre_blank + post_blank;
621
621
622 /* Penalties based on nearby blank lines: */
622 /* Penalties based on nearby blank lines: */
623 s->penalty += TOTAL_BLANK_WEIGHT * total_blank;
623 s->penalty += TOTAL_BLANK_WEIGHT * total_blank;
624 s->penalty += POST_BLANK_WEIGHT * post_blank;
624 s->penalty += POST_BLANK_WEIGHT * post_blank;
625
625
626 if (m->indent != -1)
626 if (m->indent != -1)
627 indent = m->indent;
627 indent = m->indent;
628 else
628 else
629 indent = m->post_indent;
629 indent = m->post_indent;
630
630
631 any_blanks = (total_blank != 0);
631 any_blanks = (total_blank != 0);
632
632
633 /* Note that the effective indent is -1 at the end of the file: */
633 /* Note that the effective indent is -1 at the end of the file: */
634 s->effective_indent += indent;
634 s->effective_indent += indent;
635
635
636 if (indent == -1) {
636 if (indent == -1) {
637 /* No additional adjustments needed. */
637 /* No additional adjustments needed. */
638 } else if (m->pre_indent == -1) {
638 } else if (m->pre_indent == -1) {
639 /* No additional adjustments needed. */
639 /* No additional adjustments needed. */
640 } else if (indent > m->pre_indent) {
640 } else if (indent > m->pre_indent) {
641 /*
641 /*
642 * The line is indented more than its predecessor.
642 * The line is indented more than its predecessor.
643 */
643 */
644 s->penalty += any_blanks ?
644 s->penalty += any_blanks ?
645 RELATIVE_INDENT_WITH_BLANK_PENALTY :
645 RELATIVE_INDENT_WITH_BLANK_PENALTY :
646 RELATIVE_INDENT_PENALTY;
646 RELATIVE_INDENT_PENALTY;
647 } else if (indent == m->pre_indent) {
647 } else if (indent == m->pre_indent) {
648 /*
648 /*
649 * The line has the same indentation level as its predecessor.
649 * The line has the same indentation level as its predecessor.
650 * No additional adjustments needed.
650 * No additional adjustments needed.
651 */
651 */
652 } else {
652 } else {
653 /*
653 /*
654 * The line is indented less than its predecessor. It could be
654 * The line is indented less than its predecessor. It could be
655 * the block terminator of the previous block, but it could
655 * the block terminator of the previous block, but it could
656 * also be the start of a new block (e.g., an "else" block, or
656 * also be the start of a new block (e.g., an "else" block, or
657 * maybe the previous block didn't have a block terminator).
657 * maybe the previous block didn't have a block terminator).
658 * Try to distinguish those cases based on what comes next:
658 * Try to distinguish those cases based on what comes next:
659 */
659 */
660 if (m->post_indent != -1 && m->post_indent > indent) {
660 if (m->post_indent != -1 && m->post_indent > indent) {
661 /*
661 /*
662 * The following line is indented more. So it is likely
662 * The following line is indented more. So it is likely
663 * that this line is the start of a block.
663 * that this line is the start of a block.
664 */
664 */
665 s->penalty += any_blanks ?
665 s->penalty += any_blanks ?
666 RELATIVE_OUTDENT_WITH_BLANK_PENALTY :
666 RELATIVE_OUTDENT_WITH_BLANK_PENALTY :
667 RELATIVE_OUTDENT_PENALTY;
667 RELATIVE_OUTDENT_PENALTY;
668 } else {
668 } else {
669 /*
669 /*
670 * That was probably the end of a block.
670 * That was probably the end of a block.
671 */
671 */
672 s->penalty += any_blanks ?
672 s->penalty += any_blanks ?
673 RELATIVE_DEDENT_WITH_BLANK_PENALTY :
673 RELATIVE_DEDENT_WITH_BLANK_PENALTY :
674 RELATIVE_DEDENT_PENALTY;
674 RELATIVE_DEDENT_PENALTY;
675 }
675 }
676 }
676 }
677 }
677 }
678
678
679 static int score_cmp(struct split_score *s1, struct split_score *s2)
679 static int score_cmp(struct split_score *s1, struct split_score *s2)
680 {
680 {
681 /* -1 if s1.effective_indent < s2->effective_indent, etc. */
681 /* -1 if s1.effective_indent < s2->effective_indent, etc. */
682 int cmp_indents = ((s1->effective_indent > s2->effective_indent) -
682 int cmp_indents = ((s1->effective_indent > s2->effective_indent) -
683 (s1->effective_indent < s2->effective_indent));
683 (s1->effective_indent < s2->effective_indent));
684
684
685 return INDENT_WEIGHT * cmp_indents + (s1->penalty - s2->penalty);
685 return INDENT_WEIGHT * cmp_indents + (s1->penalty - s2->penalty);
686 }
686 }
687
687
688 /*
688 /*
689 * Represent a group of changed lines in an xdfile_t (i.e., a contiguous group
689 * Represent a group of changed lines in an xdfile_t (i.e., a contiguous group
690 * of lines that was inserted or deleted from the corresponding version of the
690 * of lines that was inserted or deleted from the corresponding version of the
691 * file). We consider there to be such a group at the beginning of the file, at
691 * file). We consider there to be such a group at the beginning of the file, at
692 * the end of the file, and between any two unchanged lines, though most such
692 * the end of the file, and between any two unchanged lines, though most such
693 * groups will usually be empty.
693 * groups will usually be empty.
694 *
694 *
695 * If the first line in a group is equal to the line following the group, then
695 * If the first line in a group is equal to the line following the group, then
696 * the group can be slid down. Similarly, if the last line in a group is equal
696 * the group can be slid down. Similarly, if the last line in a group is equal
697 * to the line preceding the group, then the group can be slid up. See
697 * to the line preceding the group, then the group can be slid up. See
698 * group_slide_down() and group_slide_up().
698 * group_slide_down() and group_slide_up().
699 *
699 *
700 * Note that loops that are testing for changed lines in xdf->rchg do not need
700 * Note that loops that are testing for changed lines in xdf->rchg do not need
701 * index bounding since the array is prepared with a zero at position -1 and N.
701 * index bounding since the array is prepared with a zero at position -1 and N.
702 */
702 */
703 struct xdlgroup {
703 struct xdlgroup {
704 /*
704 /*
705 * The index of the first changed line in the group, or the index of
705 * The index of the first changed line in the group, or the index of
706 * the unchanged line above which the (empty) group is located.
706 * the unchanged line above which the (empty) group is located.
707 */
707 */
708 int64_t start;
708 int64_t start;
709
709
710 /*
710 /*
711 * The index of the first unchanged line after the group. For an empty
711 * The index of the first unchanged line after the group. For an empty
712 * group, end is equal to start.
712 * group, end is equal to start.
713 */
713 */
714 int64_t end;
714 int64_t end;
715 };
715 };
716
716
717 /*
717 /*
718 * Initialize g to point at the first group in xdf.
718 * Initialize g to point at the first group in xdf.
719 */
719 */
720 static void group_init(xdfile_t *xdf, struct xdlgroup *g)
720 static void group_init(xdfile_t *xdf, struct xdlgroup *g)
721 {
721 {
722 g->start = g->end = 0;
722 g->start = g->end = 0;
723 while (xdf->rchg[g->end])
723 while (xdf->rchg[g->end])
724 g->end++;
724 g->end++;
725 }
725 }
726
726
727 /*
727 /*
728 * Move g to describe the next (possibly empty) group in xdf and return 0. If g
728 * Move g to describe the next (possibly empty) group in xdf and return 0. If g
729 * is already at the end of the file, do nothing and return -1.
729 * is already at the end of the file, do nothing and return -1.
730 */
730 */
731 static inline int group_next(xdfile_t *xdf, struct xdlgroup *g)
731 static inline int group_next(xdfile_t *xdf, struct xdlgroup *g)
732 {
732 {
733 if (g->end == xdf->nrec)
733 if (g->end == xdf->nrec)
734 return -1;
734 return -1;
735
735
736 g->start = g->end + 1;
736 g->start = g->end + 1;
737 for (g->end = g->start; xdf->rchg[g->end]; g->end++)
737 for (g->end = g->start; xdf->rchg[g->end]; g->end++)
738 ;
738 ;
739
739
740 return 0;
740 return 0;
741 }
741 }
742
742
743 /*
743 /*
744 * Move g to describe the previous (possibly empty) group in xdf and return 0.
744 * Move g to describe the previous (possibly empty) group in xdf and return 0.
745 * If g is already at the beginning of the file, do nothing and return -1.
745 * If g is already at the beginning of the file, do nothing and return -1.
746 */
746 */
747 static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g)
747 static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g)
748 {
748 {
749 if (g->start == 0)
749 if (g->start == 0)
750 return -1;
750 return -1;
751
751
752 g->end = g->start - 1;
752 g->end = g->start - 1;
753 for (g->start = g->end; xdf->rchg[g->start - 1]; g->start--)
753 for (g->start = g->end; xdf->rchg[g->start - 1]; g->start--)
754 ;
754 ;
755
755
756 return 0;
756 return 0;
757 }
757 }
758
758
759 /*
759 /*
760 * If g can be slid toward the end of the file, do so, and if it bumps into a
760 * If g can be slid toward the end of the file, do so, and if it bumps into a
761 * following group, expand this group to include it. Return 0 on success or -1
761 * following group, expand this group to include it. Return 0 on success or -1
762 * if g cannot be slid down.
762 * if g cannot be slid down.
763 */
763 */
764 static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g)
764 static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g)
765 {
765 {
766 if (g->end < xdf->nrec &&
766 if (g->end < xdf->nrec &&
767 recs_match(xdf->recs[g->start], xdf->recs[g->end])) {
767 recs_match(xdf->recs[g->start], xdf->recs[g->end])) {
768 xdf->rchg[g->start++] = 0;
768 xdf->rchg[g->start++] = 0;
769 xdf->rchg[g->end++] = 1;
769 xdf->rchg[g->end++] = 1;
770
770
771 while (xdf->rchg[g->end])
771 while (xdf->rchg[g->end])
772 g->end++;
772 g->end++;
773
773
774 return 0;
774 return 0;
775 } else {
775 } else {
776 return -1;
776 return -1;
777 }
777 }
778 }
778 }
779
779
780 /*
780 /*
781 * If g can be slid toward the beginning of the file, do so, and if it bumps
781 * If g can be slid toward the beginning of the file, do so, and if it bumps
782 * into a previous group, expand this group to include it. Return 0 on success
782 * into a previous group, expand this group to include it. Return 0 on success
783 * or -1 if g cannot be slid up.
783 * or -1 if g cannot be slid up.
784 */
784 */
785 static int group_slide_up(xdfile_t *xdf, struct xdlgroup *g)
785 static int group_slide_up(xdfile_t *xdf, struct xdlgroup *g)
786 {
786 {
787 if (g->start > 0 &&
787 if (g->start > 0 &&
788 recs_match(xdf->recs[g->start - 1], xdf->recs[g->end - 1])) {
788 recs_match(xdf->recs[g->start - 1], xdf->recs[g->end - 1])) {
789 xdf->rchg[--g->start] = 1;
789 xdf->rchg[--g->start] = 1;
790 xdf->rchg[--g->end] = 0;
790 xdf->rchg[--g->end] = 0;
791
791
792 while (xdf->rchg[g->start - 1])
792 while (xdf->rchg[g->start - 1])
793 g->start--;
793 g->start--;
794
794
795 return 0;
795 return 0;
796 } else {
796 } else {
797 return -1;
797 return -1;
798 }
798 }
799 }
799 }
800
800
801 static void xdl_bug(const char *msg)
801 static void xdl_bug(const char *msg)
802 {
802 {
803 fprintf(stderr, "BUG: %s\n", msg);
803 fprintf(stderr, "BUG: %s\n", msg);
804 exit(1);
804 exit(1);
805 }
805 }
806
806
807 /*
807 /*
808 * For indentation heuristic, skip searching for better slide position after
808 * For indentation heuristic, skip searching for better slide position after
809 * checking MAX_BORING lines without finding an improvement. This defends the
809 * checking MAX_BORING lines without finding an improvement. This defends the
810 * indentation heuristic logic against pathological cases. The value is not
810 * indentation heuristic logic against pathological cases. The value is not
811 * picked scientifically but should be good enough.
811 * picked scientifically but should be good enough.
812 */
812 */
813 #define MAX_BORING 100
813 #define MAX_BORING 100
814
814
815 /*
815 /*
816 * Move back and forward change groups for a consistent and pretty diff output.
816 * Move back and forward change groups for a consistent and pretty diff output.
817 * This also helps in finding joinable change groups and reducing the diff
817 * This also helps in finding joinable change groups and reducing the diff
818 * size.
818 * size.
819 */
819 */
820 int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, int64_t flags) {
820 int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, int64_t flags) {
821 struct xdlgroup g, go;
821 struct xdlgroup g, go;
822 int64_t earliest_end, end_matching_other;
822 int64_t earliest_end, end_matching_other;
823 int64_t groupsize;
823 int64_t groupsize;
824
824
825 group_init(xdf, &g);
825 group_init(xdf, &g);
826 group_init(xdfo, &go);
826 group_init(xdfo, &go);
827
827
828 while (1) {
828 while (1) {
829 /* If the group is empty in the to-be-compacted file, skip it: */
829 /* If the group is empty in the to-be-compacted file, skip it: */
830 if (g.end == g.start)
830 if (g.end == g.start)
831 goto next;
831 goto next;
832
832
833 /*
833 /*
834 * Now shift the change up and then down as far as possible in
834 * Now shift the change up and then down as far as possible in
835 * each direction. If it bumps into any other changes, merge them.
835 * each direction. If it bumps into any other changes, merge them.
836 */
836 */
837 do {
837 do {
838 groupsize = g.end - g.start;
838 groupsize = g.end - g.start;
839
839
840 /*
840 /*
841 * Keep track of the last "end" index that causes this
841 * Keep track of the last "end" index that causes this
842 * group to align with a group of changed lines in the
842 * group to align with a group of changed lines in the
843 * other file. -1 indicates that we haven't found such
843 * other file. -1 indicates that we haven't found such
844 * a match yet:
844 * a match yet:
845 */
845 */
846 end_matching_other = -1;
846 end_matching_other = -1;
847
847
848 /* Shift the group backward as much as possible: */
848 /* Shift the group backward as much as possible: */
849 while (!group_slide_up(xdf, &g))
849 while (!group_slide_up(xdf, &g))
850 if (group_previous(xdfo, &go))
850 if (group_previous(xdfo, &go))
851 xdl_bug("group sync broken sliding up");
851 xdl_bug("group sync broken sliding up");
852
852
853 /*
853 /*
854 * This is this highest that this group can be shifted.
854 * This is this highest that this group can be shifted.
855 * Record its end index:
855 * Record its end index:
856 */
856 */
857 earliest_end = g.end;
857 earliest_end = g.end;
858
858
859 if (go.end > go.start)
859 if (go.end > go.start)
860 end_matching_other = g.end;
860 end_matching_other = g.end;
861
861
862 /* Now shift the group forward as far as possible: */
862 /* Now shift the group forward as far as possible: */
863 while (1) {
863 while (1) {
864 if (group_slide_down(xdf, &g))
864 if (group_slide_down(xdf, &g))
865 break;
865 break;
866 if (group_next(xdfo, &go))
866 if (group_next(xdfo, &go))
867 xdl_bug("group sync broken sliding down");
867 xdl_bug("group sync broken sliding down");
868
868
869 if (go.end > go.start)
869 if (go.end > go.start)
870 end_matching_other = g.end;
870 end_matching_other = g.end;
871 }
871 }
872 } while (groupsize != g.end - g.start);
872 } while (groupsize != g.end - g.start);
873
873
874 /*
874 /*
875 * If the group can be shifted, then we can possibly use this
875 * If the group can be shifted, then we can possibly use this
876 * freedom to produce a more intuitive diff.
876 * freedom to produce a more intuitive diff.
877 *
877 *
878 * The group is currently shifted as far down as possible, so the
878 * The group is currently shifted as far down as possible, so the
879 * heuristics below only have to handle upwards shifts.
879 * heuristics below only have to handle upwards shifts.
880 */
880 */
881
881
882 if (g.end == earliest_end) {
882 if (g.end == earliest_end) {
883 /* no shifting was possible */
883 /* no shifting was possible */
884 } else if (end_matching_other != -1) {
884 } else if (end_matching_other != -1) {
885 /*
885 /*
886 * Move the possibly merged group of changes back to line
886 * Move the possibly merged group of changes back to line
887 * up with the last group of changes from the other file
887 * up with the last group of changes from the other file
888 * that it can align with.
888 * that it can align with.
889 */
889 */
890 while (go.end == go.start) {
890 while (go.end == go.start) {
891 if (group_slide_up(xdf, &g))
891 if (group_slide_up(xdf, &g))
892 xdl_bug("match disappeared");
892 xdl_bug("match disappeared");
893 if (group_previous(xdfo, &go))
893 if (group_previous(xdfo, &go))
894 xdl_bug("group sync broken sliding to match");
894 xdl_bug("group sync broken sliding to match");
895 }
895 }
896 } else if (flags & XDF_INDENT_HEURISTIC) {
896 } else if (flags & XDF_INDENT_HEURISTIC) {
897 /*
897 /*
898 * Indent heuristic: a group of pure add/delete lines
898 * Indent heuristic: a group of pure add/delete lines
899 * implies two splits, one between the end of the "before"
899 * implies two splits, one between the end of the "before"
900 * context and the start of the group, and another between
900 * context and the start of the group, and another between
901 * the end of the group and the beginning of the "after"
901 * the end of the group and the beginning of the "after"
902 * context. Some splits are aesthetically better and some
902 * context. Some splits are aesthetically better and some
903 * are worse. We compute a badness "score" for each split,
903 * are worse. We compute a badness "score" for each split,
904 * and add the scores for the two splits to define a
904 * and add the scores for the two splits to define a
905 * "score" for each position that the group can be shifted
905 * "score" for each position that the group can be shifted
906 * to. Then we pick the shift with the lowest score.
906 * to. Then we pick the shift with the lowest score.
907 */
907 */
908 int64_t shift, best_shift = -1;
908 int64_t shift, best_shift = -1;
909 struct split_score best_score;
909 struct split_score best_score;
910
910
911 /*
911 /*
912 * This is O(N * MAX_BLANKS) (N = shift-able lines).
912 * This is O(N * MAX_BLANKS) (N = shift-able lines).
913 * Even with MAX_BLANKS bounded to a small value, a
913 * Even with MAX_BLANKS bounded to a small value, a
914 * large N could still make this loop take several
914 * large N could still make this loop take several
915 * times longer than the main diff algorithm. The
915 * times longer than the main diff algorithm. The
916 * "boring" value is to help cut down N to something
916 * "boring" value is to help cut down N to something
917 * like (MAX_BORING + groupsize).
917 * like (MAX_BORING + groupsize).
918 *
918 *
919 * Scan from bottom to top. So we can exit the loop
919 * Scan from bottom to top. So we can exit the loop
920 * without compromising the assumption "for a same best
920 * without compromising the assumption "for a same best
921 * score, pick the bottommost shift".
921 * score, pick the bottommost shift".
922 */
922 */
923 int boring = 0;
923 int boring = 0;
924 for (shift = g.end; shift >= earliest_end; shift--) {
924 for (shift = g.end; shift >= earliest_end; shift--) {
925 struct split_measurement m;
925 struct split_measurement m;
926 struct split_score score = {0, 0};
926 struct split_score score = {0, 0};
927 int cmp;
927 int cmp;
928
928
929 measure_split(xdf, shift, &m);
929 measure_split(xdf, shift, &m);
930 score_add_split(&m, &score);
930 score_add_split(&m, &score);
931 measure_split(xdf, shift - groupsize, &m);
931 measure_split(xdf, shift - groupsize, &m);
932 score_add_split(&m, &score);
932 score_add_split(&m, &score);
933
933
934 if (best_shift == -1) {
934 if (best_shift == -1) {
935 cmp = -1;
935 cmp = -1;
936 } else {
936 } else {
937 cmp = score_cmp(&score, &best_score);
937 cmp = score_cmp(&score, &best_score);
938 }
938 }
939 if (cmp < 0) {
939 if (cmp < 0) {
940 boring = 0;
940 boring = 0;
941 best_score.effective_indent = score.effective_indent;
941 best_score.effective_indent = score.effective_indent;
942 best_score.penalty = score.penalty;
942 best_score.penalty = score.penalty;
943 best_shift = shift;
943 best_shift = shift;
944 } else {
944 } else {
945 boring += 1;
945 boring += 1;
946 if (boring >= MAX_BORING)
946 if (boring >= MAX_BORING)
947 break;
947 break;
948 }
948 }
949 }
949 }
950
950
951 while (g.end > best_shift) {
951 while (g.end > best_shift) {
952 if (group_slide_up(xdf, &g))
952 if (group_slide_up(xdf, &g))
953 xdl_bug("best shift unreached");
953 xdl_bug("best shift unreached");
954 if (group_previous(xdfo, &go))
954 if (group_previous(xdfo, &go))
955 xdl_bug("group sync broken sliding to blank line");
955 xdl_bug("group sync broken sliding to blank line");
956 }
956 }
957 }
957 }
958
958
959 next:
959 next:
960 /* Move past the just-processed group: */
960 /* Move past the just-processed group: */
961 if (group_next(xdf, &g))
961 if (group_next(xdf, &g))
962 break;
962 break;
963 if (group_next(xdfo, &go))
963 if (group_next(xdfo, &go))
964 xdl_bug("group sync broken moving to next group");
964 xdl_bug("group sync broken moving to next group");
965 }
965 }
966
966
967 if (!group_next(xdfo, &go))
967 if (!group_next(xdfo, &go))
968 xdl_bug("group sync broken at end of file");
968 xdl_bug("group sync broken at end of file");
969
969
970 return 0;
970 return 0;
971 }
971 }
972
972
973
973
974 int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr) {
974 int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr) {
975 xdchange_t *cscr = NULL, *xch;
975 xdchange_t *cscr = NULL, *xch;
976 char *rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg;
976 char *rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg;
977 int64_t i1, i2, l1, l2;
977 int64_t i1, i2, l1, l2;
978
978
979 /*
979 /*
980 * Trivial. Collects "groups" of changes and creates an edit script.
980 * Trivial. Collects "groups" of changes and creates an edit script.
981 */
981 */
982 for (i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >= 0 || i2 >= 0; i1--, i2--)
982 for (i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >= 0 || i2 >= 0; i1--, i2--)
983 if (rchg1[i1 - 1] || rchg2[i2 - 1]) {
983 if (rchg1[i1 - 1] || rchg2[i2 - 1]) {
984 for (l1 = i1; rchg1[i1 - 1]; i1--);
984 for (l1 = i1; rchg1[i1 - 1]; i1--);
985 for (l2 = i2; rchg2[i2 - 1]; i2--);
985 for (l2 = i2; rchg2[i2 - 1]; i2--);
986
986
987 if (!(xch = xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) {
987 if (!(xch = xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) {
988 xdl_free_script(cscr);
988 xdl_free_script(cscr);
989 return -1;
989 return -1;
990 }
990 }
991 cscr = xch;
991 cscr = xch;
992 }
992 }
993
993
994 *xscr = cscr;
994 *xscr = cscr;
995
995
996 return 0;
996 return 0;
997 }
997 }
998
998
999
999
1000 void xdl_free_script(xdchange_t *xscr) {
1000 void xdl_free_script(xdchange_t *xscr) {
1001 xdchange_t *xch;
1001 xdchange_t *xch;
1002
1002
1003 while ((xch = xscr) != NULL) {
1003 while ((xch = xscr) != NULL) {
1004 xscr = xscr->next;
1004 xscr = xscr->next;
1005 xdl_free(xch);
1005 xdl_free(xch);
1006 }
1006 }
1007 }
1007 }
1008
1008
1009
1009
1010 /*
1010 /*
1011 * Starting at the passed change atom, find the latest change atom to be included
1011 * Starting at the passed change atom, find the latest change atom to be included
1012 * inside the differential hunk according to the specified configuration.
1012 * inside the differential hunk according to the specified configuration.
1013 * Also advance xscr if the first changes must be discarded.
1013 * Also advance xscr if the first changes must be discarded.
1014 */
1014 */
1015 xdchange_t *xdl_get_hunk(xdchange_t **xscr)
1015 xdchange_t *xdl_get_hunk(xdchange_t **xscr)
1016 {
1016 {
1017 xdchange_t *xch, *xchp, *lxch;
1017 xdchange_t *xch, *xchp, *lxch;
1018 int64_t max_common = 0;
1019 int64_t max_ignorable = 0;
1020 uint64_t ignored = 0; /* number of ignored blank lines */
1018 uint64_t ignored = 0; /* number of ignored blank lines */
1021
1019
1022 /* remove ignorable changes that are too far before other changes */
1020 /* remove ignorable changes that are too far before other changes */
1023 for (xchp = *xscr; xchp && xchp->ignore; xchp = xchp->next) {
1021 for (xchp = *xscr; xchp && xchp->ignore; xchp = xchp->next) {
1024 xch = xchp->next;
1022 xch = xchp->next;
1025
1023
1026 if (xch == NULL ||
1024 if (xch == NULL ||
1027 xch->i1 - (xchp->i1 + xchp->chg1) >= max_ignorable)
1025 xch->i1 - (xchp->i1 + xchp->chg1) >= 0)
1028 *xscr = xch;
1026 *xscr = xch;
1029 }
1027 }
1030
1028
1031 if (*xscr == NULL)
1029 if (*xscr == NULL)
1032 return NULL;
1030 return NULL;
1033
1031
1034 lxch = *xscr;
1032 lxch = *xscr;
1035
1033
1036 for (xchp = *xscr, xch = xchp->next; xch; xchp = xch, xch = xch->next) {
1034 for (xchp = *xscr, xch = xchp->next; xch; xchp = xch, xch = xch->next) {
1037 int64_t distance = xch->i1 - (xchp->i1 + xchp->chg1);
1035 int64_t distance = xch->i1 - (xchp->i1 + xchp->chg1);
1038 if (distance > max_common)
1036 if (distance > 0)
1039 break;
1037 break;
1040
1038
1041 if (distance < max_ignorable && (!xch->ignore || lxch == xchp)) {
1039 if (distance < 0 && (!xch->ignore || lxch == xchp)) {
1042 lxch = xch;
1040 lxch = xch;
1043 ignored = 0;
1041 ignored = 0;
1044 } else if (distance < max_ignorable && xch->ignore) {
1042 } else if (distance < 0 && xch->ignore) {
1045 ignored += xch->chg2;
1043 ignored += xch->chg2;
1046 } else if (lxch != xchp &&
1044 } else if (lxch != xchp &&
1047 xch->i1 + ignored - (lxch->i1 + lxch->chg1) > max_common) {
1045 xch->i1 + ignored - (lxch->i1 + lxch->chg1) > 0) {
1048 break;
1046 break;
1049 } else if (!xch->ignore) {
1047 } else if (!xch->ignore) {
1050 lxch = xch;
1048 lxch = xch;
1051 ignored = 0;
1049 ignored = 0;
1052 } else {
1050 } else {
1053 ignored += xch->chg2;
1051 ignored += xch->chg2;
1054 }
1052 }
1055 }
1053 }
1056
1054
1057 return lxch;
1055 return lxch;
1058 }
1056 }
1059
1057
1060
1058
1061 static int xdl_call_hunk_func(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
1059 static int xdl_call_hunk_func(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
1062 xdemitconf_t const *xecfg)
1060 xdemitconf_t const *xecfg)
1063 {
1061 {
1064 int64_t p = xe->nprefix, s = xe->nsuffix;
1062 int64_t p = xe->nprefix, s = xe->nsuffix;
1065 xdchange_t *xch, *xche;
1063 xdchange_t *xch, *xche;
1066
1064
1067 if (!xecfg->hunk_func)
1065 if (!xecfg->hunk_func)
1068 return -1;
1066 return -1;
1069
1067
1070 if ((xecfg->flags & XDL_EMIT_BDIFFHUNK) != 0) {
1068 if ((xecfg->flags & XDL_EMIT_BDIFFHUNK) != 0) {
1071 int64_t i1 = 0, i2 = 0, n1 = xe->xdf1.nrec, n2 = xe->xdf2.nrec;
1069 int64_t i1 = 0, i2 = 0, n1 = xe->xdf1.nrec, n2 = xe->xdf2.nrec;
1072 for (xch = xscr; xch; xch = xche->next) {
1070 for (xch = xscr; xch; xch = xche->next) {
1073 xche = xdl_get_hunk(&xch);
1071 xche = xdl_get_hunk(&xch);
1074 if (!xch)
1072 if (!xch)
1075 break;
1073 break;
1076 if (xch != xche)
1074 if (xch != xche)
1077 xdl_bug("xch != xche");
1075 xdl_bug("xch != xche");
1078 xch->i1 += p;
1076 xch->i1 += p;
1079 xch->i2 += p;
1077 xch->i2 += p;
1080 if (xch->i1 > i1 || xch->i2 > i2) {
1078 if (xch->i1 > i1 || xch->i2 > i2) {
1081 if (xecfg->hunk_func(i1, xch->i1, i2, xch->i2, ecb->priv) < 0)
1079 if (xecfg->hunk_func(i1, xch->i1, i2, xch->i2, ecb->priv) < 0)
1082 return -1;
1080 return -1;
1083 }
1081 }
1084 i1 = xche->i1 + xche->chg1;
1082 i1 = xche->i1 + xche->chg1;
1085 i2 = xche->i2 + xche->chg2;
1083 i2 = xche->i2 + xche->chg2;
1086 }
1084 }
1087 if (xecfg->hunk_func(i1, n1 + p + s, i2, n2 + p + s,
1085 if (xecfg->hunk_func(i1, n1 + p + s, i2, n2 + p + s,
1088 ecb->priv) < 0)
1086 ecb->priv) < 0)
1089 return -1;
1087 return -1;
1090 } else {
1088 } else {
1091 for (xch = xscr; xch; xch = xche->next) {
1089 for (xch = xscr; xch; xch = xche->next) {
1092 xche = xdl_get_hunk(&xch);
1090 xche = xdl_get_hunk(&xch);
1093 if (!xch)
1091 if (!xch)
1094 break;
1092 break;
1095 if (xecfg->hunk_func(xch->i1 + p,
1093 if (xecfg->hunk_func(xch->i1 + p,
1096 xche->i1 + xche->chg1 - xch->i1,
1094 xche->i1 + xche->chg1 - xch->i1,
1097 xch->i2 + p,
1095 xch->i2 + p,
1098 xche->i2 + xche->chg2 - xch->i2,
1096 xche->i2 + xche->chg2 - xch->i2,
1099 ecb->priv) < 0)
1097 ecb->priv) < 0)
1100 return -1;
1098 return -1;
1101 }
1099 }
1102 }
1100 }
1103 return 0;
1101 return 0;
1104 }
1102 }
1105
1103
1106 int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
1104 int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
1107 xdemitconf_t const *xecfg, xdemitcb_t *ecb) {
1105 xdemitconf_t const *xecfg, xdemitcb_t *ecb) {
1108 xdchange_t *xscr;
1106 xdchange_t *xscr;
1109 xdfenv_t xe;
1107 xdfenv_t xe;
1110
1108
1111 if (xdl_do_diff(mf1, mf2, xpp, &xe) < 0) {
1109 if (xdl_do_diff(mf1, mf2, xpp, &xe) < 0) {
1112
1110
1113 return -1;
1111 return -1;
1114 }
1112 }
1115 if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 ||
1113 if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 ||
1116 xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) < 0 ||
1114 xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) < 0 ||
1117 xdl_build_script(&xe, &xscr) < 0) {
1115 xdl_build_script(&xe, &xscr) < 0) {
1118
1116
1119 xdl_free_env(&xe);
1117 xdl_free_env(&xe);
1120 return -1;
1118 return -1;
1121 }
1119 }
1122
1120
1123 if (xdl_call_hunk_func(&xe, xscr, ecb, xecfg) < 0) {
1121 if (xdl_call_hunk_func(&xe, xscr, ecb, xecfg) < 0) {
1124 xdl_free_script(xscr);
1122 xdl_free_script(xscr);
1125 xdl_free_env(&xe);
1123 xdl_free_env(&xe);
1126 return -1;
1124 return -1;
1127 }
1125 }
1128 xdl_free_script(xscr);
1126 xdl_free_script(xscr);
1129 xdl_free_env(&xe);
1127 xdl_free_env(&xe);
1130
1128
1131 return 0;
1129 return 0;
1132 }
1130 }
General Comments 0
You need to be logged in to leave comments. Login now