##// END OF EJS Templates
xdiff: add a preprocessing step that trims files...
Jun Wu -
r36838:f33a87cf default
parent child Browse files
Show More
@@ -1,1126 +1,1133 b''
1 /*
1 /*
2 * LibXDiff by Davide Libenzi ( File Differential Library )
2 * LibXDiff by Davide Libenzi ( File Differential Library )
3 * Copyright (C) 2003 Davide Libenzi
3 * Copyright (C) 2003 Davide Libenzi
4 *
4 *
5 * This library is free software; you can redistribute it and/or
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
8 * version 2.1 of the License, or (at your option) any later version.
9 *
9 *
10 * This library is distributed in the hope that it will be useful,
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
13 * Lesser General Public License for more details.
14 *
14 *
15 * You should have received a copy of the GNU Lesser General Public
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, see
16 * License along with this library; if not, see
17 * <http://www.gnu.org/licenses/>.
17 * <http://www.gnu.org/licenses/>.
18 *
18 *
19 * Davide Libenzi <davidel@xmailserver.org>
19 * Davide Libenzi <davidel@xmailserver.org>
20 *
20 *
21 */
21 */
22
22
23 #include "xinclude.h"
23 #include "xinclude.h"
24
24
25
25
26
26
27 #define XDL_MAX_COST_MIN 256
27 #define XDL_MAX_COST_MIN 256
28 #define XDL_HEUR_MIN_COST 256
28 #define XDL_HEUR_MIN_COST 256
29 #define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1)
29 #define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1)
30 #define XDL_SNAKE_CNT 20
30 #define XDL_SNAKE_CNT 20
31 #define XDL_K_HEUR 4
31 #define XDL_K_HEUR 4
32
32
33 /* VC 2008 doesn't know about the inline keyword. */
33 /* VC 2008 doesn't know about the inline keyword. */
34 #if defined(_MSC_VER)
34 #if defined(_MSC_VER)
35 #define inline __forceinline
35 #define inline __forceinline
36 #endif
36 #endif
37
37
38
38
39 typedef struct s_xdpsplit {
39 typedef struct s_xdpsplit {
40 long i1, i2;
40 long i1, i2;
41 int min_lo, min_hi;
41 int min_lo, min_hi;
42 } xdpsplit_t;
42 } xdpsplit_t;
43
43
44
44
45
45
46
46
47 static long xdl_split(unsigned long const *ha1, long off1, long lim1,
47 static long xdl_split(unsigned long const *ha1, long off1, long lim1,
48 unsigned long const *ha2, long off2, long lim2,
48 unsigned long const *ha2, long off2, long lim2,
49 long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl,
49 long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl,
50 xdalgoenv_t *xenv);
50 xdalgoenv_t *xenv);
51 static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2);
51 static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2);
52
52
53
53
54
54
55
55
56
56
57 /*
57 /*
58 * See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers.
58 * See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers.
59 * Basically considers a "box" (off1, off2, lim1, lim2) and scan from both
59 * Basically considers a "box" (off1, off2, lim1, lim2) and scan from both
60 * the forward diagonal starting from (off1, off2) and the backward diagonal
60 * the forward diagonal starting from (off1, off2) and the backward diagonal
61 * starting from (lim1, lim2). If the K values on the same diagonal crosses
61 * starting from (lim1, lim2). If the K values on the same diagonal crosses
62 * returns the furthest point of reach. We might end up having to expensive
62 * returns the furthest point of reach. We might end up having to expensive
63 * cases using this algorithm is full, so a little bit of heuristic is needed
63 * cases using this algorithm is full, so a little bit of heuristic is needed
64 * to cut the search and to return a suboptimal point.
64 * to cut the search and to return a suboptimal point.
65 */
65 */
66 static long xdl_split(unsigned long const *ha1, long off1, long lim1,
66 static long xdl_split(unsigned long const *ha1, long off1, long lim1,
67 unsigned long const *ha2, long off2, long lim2,
67 unsigned long const *ha2, long off2, long lim2,
68 long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl,
68 long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl,
69 xdalgoenv_t *xenv) {
69 xdalgoenv_t *xenv) {
70 long dmin = off1 - lim2, dmax = lim1 - off2;
70 long dmin = off1 - lim2, dmax = lim1 - off2;
71 long fmid = off1 - off2, bmid = lim1 - lim2;
71 long fmid = off1 - off2, bmid = lim1 - lim2;
72 long odd = (fmid - bmid) & 1;
72 long odd = (fmid - bmid) & 1;
73 long fmin = fmid, fmax = fmid;
73 long fmin = fmid, fmax = fmid;
74 long bmin = bmid, bmax = bmid;
74 long bmin = bmid, bmax = bmid;
75 long ec, d, i1, i2, prev1, best, dd, v, k;
75 long ec, d, i1, i2, prev1, best, dd, v, k;
76
76
77 /*
77 /*
78 * Set initial diagonal values for both forward and backward path.
78 * Set initial diagonal values for both forward and backward path.
79 */
79 */
80 kvdf[fmid] = off1;
80 kvdf[fmid] = off1;
81 kvdb[bmid] = lim1;
81 kvdb[bmid] = lim1;
82
82
83 for (ec = 1;; ec++) {
83 for (ec = 1;; ec++) {
84 int got_snake = 0;
84 int got_snake = 0;
85
85
86 /*
86 /*
87 * We need to extent the diagonal "domain" by one. If the next
87 * We need to extent the diagonal "domain" by one. If the next
88 * values exits the box boundaries we need to change it in the
88 * values exits the box boundaries we need to change it in the
89 * opposite direction because (max - min) must be a power of two.
89 * opposite direction because (max - min) must be a power of two.
90 * Also we initialize the external K value to -1 so that we can
90 * Also we initialize the external K value to -1 so that we can
91 * avoid extra conditions check inside the core loop.
91 * avoid extra conditions check inside the core loop.
92 */
92 */
93 if (fmin > dmin)
93 if (fmin > dmin)
94 kvdf[--fmin - 1] = -1;
94 kvdf[--fmin - 1] = -1;
95 else
95 else
96 ++fmin;
96 ++fmin;
97 if (fmax < dmax)
97 if (fmax < dmax)
98 kvdf[++fmax + 1] = -1;
98 kvdf[++fmax + 1] = -1;
99 else
99 else
100 --fmax;
100 --fmax;
101
101
102 for (d = fmax; d >= fmin; d -= 2) {
102 for (d = fmax; d >= fmin; d -= 2) {
103 if (kvdf[d - 1] >= kvdf[d + 1])
103 if (kvdf[d - 1] >= kvdf[d + 1])
104 i1 = kvdf[d - 1] + 1;
104 i1 = kvdf[d - 1] + 1;
105 else
105 else
106 i1 = kvdf[d + 1];
106 i1 = kvdf[d + 1];
107 prev1 = i1;
107 prev1 = i1;
108 i2 = i1 - d;
108 i2 = i1 - d;
109 for (; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++);
109 for (; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++);
110 if (i1 - prev1 > xenv->snake_cnt)
110 if (i1 - prev1 > xenv->snake_cnt)
111 got_snake = 1;
111 got_snake = 1;
112 kvdf[d] = i1;
112 kvdf[d] = i1;
113 if (odd && bmin <= d && d <= bmax && kvdb[d] <= i1) {
113 if (odd && bmin <= d && d <= bmax && kvdb[d] <= i1) {
114 spl->i1 = i1;
114 spl->i1 = i1;
115 spl->i2 = i2;
115 spl->i2 = i2;
116 spl->min_lo = spl->min_hi = 1;
116 spl->min_lo = spl->min_hi = 1;
117 return ec;
117 return ec;
118 }
118 }
119 }
119 }
120
120
121 /*
121 /*
122 * We need to extent the diagonal "domain" by one. If the next
122 * We need to extent the diagonal "domain" by one. If the next
123 * values exits the box boundaries we need to change it in the
123 * values exits the box boundaries we need to change it in the
124 * opposite direction because (max - min) must be a power of two.
124 * opposite direction because (max - min) must be a power of two.
125 * Also we initialize the external K value to -1 so that we can
125 * Also we initialize the external K value to -1 so that we can
126 * avoid extra conditions check inside the core loop.
126 * avoid extra conditions check inside the core loop.
127 */
127 */
128 if (bmin > dmin)
128 if (bmin > dmin)
129 kvdb[--bmin - 1] = XDL_LINE_MAX;
129 kvdb[--bmin - 1] = XDL_LINE_MAX;
130 else
130 else
131 ++bmin;
131 ++bmin;
132 if (bmax < dmax)
132 if (bmax < dmax)
133 kvdb[++bmax + 1] = XDL_LINE_MAX;
133 kvdb[++bmax + 1] = XDL_LINE_MAX;
134 else
134 else
135 --bmax;
135 --bmax;
136
136
137 for (d = bmax; d >= bmin; d -= 2) {
137 for (d = bmax; d >= bmin; d -= 2) {
138 if (kvdb[d - 1] < kvdb[d + 1])
138 if (kvdb[d - 1] < kvdb[d + 1])
139 i1 = kvdb[d - 1];
139 i1 = kvdb[d - 1];
140 else
140 else
141 i1 = kvdb[d + 1] - 1;
141 i1 = kvdb[d + 1] - 1;
142 prev1 = i1;
142 prev1 = i1;
143 i2 = i1 - d;
143 i2 = i1 - d;
144 for (; i1 > off1 && i2 > off2 && ha1[i1 - 1] == ha2[i2 - 1]; i1--, i2--);
144 for (; i1 > off1 && i2 > off2 && ha1[i1 - 1] == ha2[i2 - 1]; i1--, i2--);
145 if (prev1 - i1 > xenv->snake_cnt)
145 if (prev1 - i1 > xenv->snake_cnt)
146 got_snake = 1;
146 got_snake = 1;
147 kvdb[d] = i1;
147 kvdb[d] = i1;
148 if (!odd && fmin <= d && d <= fmax && i1 <= kvdf[d]) {
148 if (!odd && fmin <= d && d <= fmax && i1 <= kvdf[d]) {
149 spl->i1 = i1;
149 spl->i1 = i1;
150 spl->i2 = i2;
150 spl->i2 = i2;
151 spl->min_lo = spl->min_hi = 1;
151 spl->min_lo = spl->min_hi = 1;
152 return ec;
152 return ec;
153 }
153 }
154 }
154 }
155
155
156 if (need_min)
156 if (need_min)
157 continue;
157 continue;
158
158
159 /*
159 /*
160 * If the edit cost is above the heuristic trigger and if
160 * If the edit cost is above the heuristic trigger and if
161 * we got a good snake, we sample current diagonals to see
161 * we got a good snake, we sample current diagonals to see
162 * if some of the, have reached an "interesting" path. Our
162 * if some of the, have reached an "interesting" path. Our
163 * measure is a function of the distance from the diagonal
163 * measure is a function of the distance from the diagonal
164 * corner (i1 + i2) penalized with the distance from the
164 * corner (i1 + i2) penalized with the distance from the
165 * mid diagonal itself. If this value is above the current
165 * mid diagonal itself. If this value is above the current
166 * edit cost times a magic factor (XDL_K_HEUR) we consider
166 * edit cost times a magic factor (XDL_K_HEUR) we consider
167 * it interesting.
167 * it interesting.
168 */
168 */
169 if (got_snake && ec > xenv->heur_min) {
169 if (got_snake && ec > xenv->heur_min) {
170 for (best = 0, d = fmax; d >= fmin; d -= 2) {
170 for (best = 0, d = fmax; d >= fmin; d -= 2) {
171 dd = d > fmid ? d - fmid: fmid - d;
171 dd = d > fmid ? d - fmid: fmid - d;
172 i1 = kvdf[d];
172 i1 = kvdf[d];
173 i2 = i1 - d;
173 i2 = i1 - d;
174 v = (i1 - off1) + (i2 - off2) - dd;
174 v = (i1 - off1) + (i2 - off2) - dd;
175
175
176 if (v > XDL_K_HEUR * ec && v > best &&
176 if (v > XDL_K_HEUR * ec && v > best &&
177 off1 + xenv->snake_cnt <= i1 && i1 < lim1 &&
177 off1 + xenv->snake_cnt <= i1 && i1 < lim1 &&
178 off2 + xenv->snake_cnt <= i2 && i2 < lim2) {
178 off2 + xenv->snake_cnt <= i2 && i2 < lim2) {
179 for (k = 1; ha1[i1 - k] == ha2[i2 - k]; k++)
179 for (k = 1; ha1[i1 - k] == ha2[i2 - k]; k++)
180 if (k == xenv->snake_cnt) {
180 if (k == xenv->snake_cnt) {
181 best = v;
181 best = v;
182 spl->i1 = i1;
182 spl->i1 = i1;
183 spl->i2 = i2;
183 spl->i2 = i2;
184 break;
184 break;
185 }
185 }
186 }
186 }
187 }
187 }
188 if (best > 0) {
188 if (best > 0) {
189 spl->min_lo = 1;
189 spl->min_lo = 1;
190 spl->min_hi = 0;
190 spl->min_hi = 0;
191 return ec;
191 return ec;
192 }
192 }
193
193
194 for (best = 0, d = bmax; d >= bmin; d -= 2) {
194 for (best = 0, d = bmax; d >= bmin; d -= 2) {
195 dd = d > bmid ? d - bmid: bmid - d;
195 dd = d > bmid ? d - bmid: bmid - d;
196 i1 = kvdb[d];
196 i1 = kvdb[d];
197 i2 = i1 - d;
197 i2 = i1 - d;
198 v = (lim1 - i1) + (lim2 - i2) - dd;
198 v = (lim1 - i1) + (lim2 - i2) - dd;
199
199
200 if (v > XDL_K_HEUR * ec && v > best &&
200 if (v > XDL_K_HEUR * ec && v > best &&
201 off1 < i1 && i1 <= lim1 - xenv->snake_cnt &&
201 off1 < i1 && i1 <= lim1 - xenv->snake_cnt &&
202 off2 < i2 && i2 <= lim2 - xenv->snake_cnt) {
202 off2 < i2 && i2 <= lim2 - xenv->snake_cnt) {
203 for (k = 0; ha1[i1 + k] == ha2[i2 + k]; k++)
203 for (k = 0; ha1[i1 + k] == ha2[i2 + k]; k++)
204 if (k == xenv->snake_cnt - 1) {
204 if (k == xenv->snake_cnt - 1) {
205 best = v;
205 best = v;
206 spl->i1 = i1;
206 spl->i1 = i1;
207 spl->i2 = i2;
207 spl->i2 = i2;
208 break;
208 break;
209 }
209 }
210 }
210 }
211 }
211 }
212 if (best > 0) {
212 if (best > 0) {
213 spl->min_lo = 0;
213 spl->min_lo = 0;
214 spl->min_hi = 1;
214 spl->min_hi = 1;
215 return ec;
215 return ec;
216 }
216 }
217 }
217 }
218
218
219 /*
219 /*
220 * Enough is enough. We spent too much time here and now we collect
220 * Enough is enough. We spent too much time here and now we collect
221 * the furthest reaching path using the (i1 + i2) measure.
221 * the furthest reaching path using the (i1 + i2) measure.
222 */
222 */
223 if (ec >= xenv->mxcost) {
223 if (ec >= xenv->mxcost) {
224 long fbest, fbest1, bbest, bbest1;
224 long fbest, fbest1, bbest, bbest1;
225
225
226 fbest = fbest1 = -1;
226 fbest = fbest1 = -1;
227 for (d = fmax; d >= fmin; d -= 2) {
227 for (d = fmax; d >= fmin; d -= 2) {
228 i1 = XDL_MIN(kvdf[d], lim1);
228 i1 = XDL_MIN(kvdf[d], lim1);
229 i2 = i1 - d;
229 i2 = i1 - d;
230 if (lim2 < i2)
230 if (lim2 < i2)
231 i1 = lim2 + d, i2 = lim2;
231 i1 = lim2 + d, i2 = lim2;
232 if (fbest < i1 + i2) {
232 if (fbest < i1 + i2) {
233 fbest = i1 + i2;
233 fbest = i1 + i2;
234 fbest1 = i1;
234 fbest1 = i1;
235 }
235 }
236 }
236 }
237
237
238 bbest = bbest1 = XDL_LINE_MAX;
238 bbest = bbest1 = XDL_LINE_MAX;
239 for (d = bmax; d >= bmin; d -= 2) {
239 for (d = bmax; d >= bmin; d -= 2) {
240 i1 = XDL_MAX(off1, kvdb[d]);
240 i1 = XDL_MAX(off1, kvdb[d]);
241 i2 = i1 - d;
241 i2 = i1 - d;
242 if (i2 < off2)
242 if (i2 < off2)
243 i1 = off2 + d, i2 = off2;
243 i1 = off2 + d, i2 = off2;
244 if (i1 + i2 < bbest) {
244 if (i1 + i2 < bbest) {
245 bbest = i1 + i2;
245 bbest = i1 + i2;
246 bbest1 = i1;
246 bbest1 = i1;
247 }
247 }
248 }
248 }
249
249
250 if ((lim1 + lim2) - bbest < fbest - (off1 + off2)) {
250 if ((lim1 + lim2) - bbest < fbest - (off1 + off2)) {
251 spl->i1 = fbest1;
251 spl->i1 = fbest1;
252 spl->i2 = fbest - fbest1;
252 spl->i2 = fbest - fbest1;
253 spl->min_lo = 1;
253 spl->min_lo = 1;
254 spl->min_hi = 0;
254 spl->min_hi = 0;
255 } else {
255 } else {
256 spl->i1 = bbest1;
256 spl->i1 = bbest1;
257 spl->i2 = bbest - bbest1;
257 spl->i2 = bbest - bbest1;
258 spl->min_lo = 0;
258 spl->min_lo = 0;
259 spl->min_hi = 1;
259 spl->min_hi = 1;
260 }
260 }
261 return ec;
261 return ec;
262 }
262 }
263 }
263 }
264 }
264 }
265
265
266
266
267 /*
267 /*
268 * Rule: "Divide et Impera". Recursively split the box in sub-boxes by calling
268 * Rule: "Divide et Impera". Recursively split the box in sub-boxes by calling
269 * the box splitting function. Note that the real job (marking changed lines)
269 * the box splitting function. Note that the real job (marking changed lines)
270 * is done in the two boundary reaching checks.
270 * is done in the two boundary reaching checks.
271 */
271 */
272 int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1,
272 int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1,
273 diffdata_t *dd2, long off2, long lim2,
273 diffdata_t *dd2, long off2, long lim2,
274 long *kvdf, long *kvdb, int need_min, xdalgoenv_t *xenv) {
274 long *kvdf, long *kvdb, int need_min, xdalgoenv_t *xenv) {
275 unsigned long const *ha1 = dd1->ha, *ha2 = dd2->ha;
275 unsigned long const *ha1 = dd1->ha, *ha2 = dd2->ha;
276
276
277 /*
277 /*
278 * Shrink the box by walking through each diagonal snake (SW and NE).
278 * Shrink the box by walking through each diagonal snake (SW and NE).
279 */
279 */
280 for (; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++);
280 for (; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++);
281 for (; off1 < lim1 && off2 < lim2 && ha1[lim1 - 1] == ha2[lim2 - 1]; lim1--, lim2--);
281 for (; off1 < lim1 && off2 < lim2 && ha1[lim1 - 1] == ha2[lim2 - 1]; lim1--, lim2--);
282
282
283 /*
283 /*
284 * If one dimension is empty, then all records on the other one must
284 * If one dimension is empty, then all records on the other one must
285 * be obviously changed.
285 * be obviously changed.
286 */
286 */
287 if (off1 == lim1) {
287 if (off1 == lim1) {
288 char *rchg2 = dd2->rchg;
288 char *rchg2 = dd2->rchg;
289 long *rindex2 = dd2->rindex;
289 long *rindex2 = dd2->rindex;
290
290
291 for (; off2 < lim2; off2++)
291 for (; off2 < lim2; off2++)
292 rchg2[rindex2[off2]] = 1;
292 rchg2[rindex2[off2]] = 1;
293 } else if (off2 == lim2) {
293 } else if (off2 == lim2) {
294 char *rchg1 = dd1->rchg;
294 char *rchg1 = dd1->rchg;
295 long *rindex1 = dd1->rindex;
295 long *rindex1 = dd1->rindex;
296
296
297 for (; off1 < lim1; off1++)
297 for (; off1 < lim1; off1++)
298 rchg1[rindex1[off1]] = 1;
298 rchg1[rindex1[off1]] = 1;
299 } else {
299 } else {
300 xdpsplit_t spl;
300 xdpsplit_t spl;
301 spl.i1 = spl.i2 = 0;
301 spl.i1 = spl.i2 = 0;
302
302
303 /*
303 /*
304 * Divide ...
304 * Divide ...
305 */
305 */
306 if (xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb,
306 if (xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb,
307 need_min, &spl, xenv) < 0) {
307 need_min, &spl, xenv) < 0) {
308
308
309 return -1;
309 return -1;
310 }
310 }
311
311
312 /*
312 /*
313 * ... et Impera.
313 * ... et Impera.
314 */
314 */
315 if (xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2,
315 if (xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2,
316 kvdf, kvdb, spl.min_lo, xenv) < 0 ||
316 kvdf, kvdb, spl.min_lo, xenv) < 0 ||
317 xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2,
317 xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2,
318 kvdf, kvdb, spl.min_hi, xenv) < 0) {
318 kvdf, kvdb, spl.min_hi, xenv) < 0) {
319
319
320 return -1;
320 return -1;
321 }
321 }
322 }
322 }
323
323
324 return 0;
324 return 0;
325 }
325 }
326
326
327
327
328 int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
328 int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
329 xdfenv_t *xe) {
329 xdfenv_t *xe) {
330 long ndiags;
330 long ndiags;
331 long *kvd, *kvdf, *kvdb;
331 long *kvd, *kvdf, *kvdb;
332 xdalgoenv_t xenv;
332 xdalgoenv_t xenv;
333 diffdata_t dd1, dd2;
333 diffdata_t dd1, dd2;
334
334
335 if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0) {
335 if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0) {
336
336
337 return -1;
337 return -1;
338 }
338 }
339
339
340 /*
340 /*
341 * Allocate and setup K vectors to be used by the differential algorithm.
341 * Allocate and setup K vectors to be used by the differential algorithm.
342 * One is to store the forward path and one to store the backward path.
342 * One is to store the forward path and one to store the backward path.
343 */
343 */
344 ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3;
344 ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3;
345 if (!(kvd = (long *) xdl_malloc((2 * ndiags + 2) * sizeof(long)))) {
345 if (!(kvd = (long *) xdl_malloc((2 * ndiags + 2) * sizeof(long)))) {
346
346
347 xdl_free_env(xe);
347 xdl_free_env(xe);
348 return -1;
348 return -1;
349 }
349 }
350 kvdf = kvd;
350 kvdf = kvd;
351 kvdb = kvdf + ndiags;
351 kvdb = kvdf + ndiags;
352 kvdf += xe->xdf2.nreff + 1;
352 kvdf += xe->xdf2.nreff + 1;
353 kvdb += xe->xdf2.nreff + 1;
353 kvdb += xe->xdf2.nreff + 1;
354
354
355 xenv.mxcost = xdl_bogosqrt(ndiags);
355 xenv.mxcost = xdl_bogosqrt(ndiags);
356 if (xenv.mxcost < XDL_MAX_COST_MIN)
356 if (xenv.mxcost < XDL_MAX_COST_MIN)
357 xenv.mxcost = XDL_MAX_COST_MIN;
357 xenv.mxcost = XDL_MAX_COST_MIN;
358 xenv.snake_cnt = XDL_SNAKE_CNT;
358 xenv.snake_cnt = XDL_SNAKE_CNT;
359 xenv.heur_min = XDL_HEUR_MIN_COST;
359 xenv.heur_min = XDL_HEUR_MIN_COST;
360
360
361 dd1.nrec = xe->xdf1.nreff;
361 dd1.nrec = xe->xdf1.nreff;
362 dd1.ha = xe->xdf1.ha;
362 dd1.ha = xe->xdf1.ha;
363 dd1.rchg = xe->xdf1.rchg;
363 dd1.rchg = xe->xdf1.rchg;
364 dd1.rindex = xe->xdf1.rindex;
364 dd1.rindex = xe->xdf1.rindex;
365 dd2.nrec = xe->xdf2.nreff;
365 dd2.nrec = xe->xdf2.nreff;
366 dd2.ha = xe->xdf2.ha;
366 dd2.ha = xe->xdf2.ha;
367 dd2.rchg = xe->xdf2.rchg;
367 dd2.rchg = xe->xdf2.rchg;
368 dd2.rindex = xe->xdf2.rindex;
368 dd2.rindex = xe->xdf2.rindex;
369
369
370 if (xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec,
370 if (xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec,
371 kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0, &xenv) < 0) {
371 kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0, &xenv) < 0) {
372
372
373 xdl_free(kvd);
373 xdl_free(kvd);
374 xdl_free_env(xe);
374 xdl_free_env(xe);
375 return -1;
375 return -1;
376 }
376 }
377
377
378 xdl_free(kvd);
378 xdl_free(kvd);
379
379
380 return 0;
380 return 0;
381 }
381 }
382
382
383
383
384 static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2) {
384 static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2) {
385 xdchange_t *xch;
385 xdchange_t *xch;
386
386
387 if (!(xch = (xdchange_t *) xdl_malloc(sizeof(xdchange_t))))
387 if (!(xch = (xdchange_t *) xdl_malloc(sizeof(xdchange_t))))
388 return NULL;
388 return NULL;
389
389
390 xch->next = xscr;
390 xch->next = xscr;
391 xch->i1 = i1;
391 xch->i1 = i1;
392 xch->i2 = i2;
392 xch->i2 = i2;
393 xch->chg1 = chg1;
393 xch->chg1 = chg1;
394 xch->chg2 = chg2;
394 xch->chg2 = chg2;
395 xch->ignore = 0;
395 xch->ignore = 0;
396
396
397 return xch;
397 return xch;
398 }
398 }
399
399
400
400
401 static int recs_match(xrecord_t *rec1, xrecord_t *rec2, long flags)
401 static int recs_match(xrecord_t *rec1, xrecord_t *rec2, long flags)
402 {
402 {
403 return (rec1->ha == rec2->ha &&
403 return (rec1->ha == rec2->ha &&
404 xdl_recmatch(rec1->ptr, rec1->size,
404 xdl_recmatch(rec1->ptr, rec1->size,
405 rec2->ptr, rec2->size,
405 rec2->ptr, rec2->size,
406 flags));
406 flags));
407 }
407 }
408
408
409 /*
409 /*
410 * If a line is indented more than this, get_indent() just returns this value.
410 * If a line is indented more than this, get_indent() just returns this value.
411 * This avoids having to do absurd amounts of work for data that are not
411 * This avoids having to do absurd amounts of work for data that are not
412 * human-readable text, and also ensures that the output of get_indent fits within
412 * human-readable text, and also ensures that the output of get_indent fits within
413 * an int.
413 * an int.
414 */
414 */
415 #define MAX_INDENT 200
415 #define MAX_INDENT 200
416
416
417 /*
417 /*
418 * Return the amount of indentation of the specified line, treating TAB as 8
418 * Return the amount of indentation of the specified line, treating TAB as 8
419 * columns. Return -1 if line is empty or contains only whitespace. Clamp the
419 * columns. Return -1 if line is empty or contains only whitespace. Clamp the
420 * output value at MAX_INDENT.
420 * output value at MAX_INDENT.
421 */
421 */
422 static int get_indent(xrecord_t *rec)
422 static int get_indent(xrecord_t *rec)
423 {
423 {
424 long i;
424 long i;
425 int ret = 0;
425 int ret = 0;
426
426
427 for (i = 0; i < rec->size; i++) {
427 for (i = 0; i < rec->size; i++) {
428 char c = rec->ptr[i];
428 char c = rec->ptr[i];
429
429
430 if (!XDL_ISSPACE(c))
430 if (!XDL_ISSPACE(c))
431 return ret;
431 return ret;
432 else if (c == ' ')
432 else if (c == ' ')
433 ret += 1;
433 ret += 1;
434 else if (c == '\t')
434 else if (c == '\t')
435 ret += 8 - ret % 8;
435 ret += 8 - ret % 8;
436 /* ignore other whitespace characters */
436 /* ignore other whitespace characters */
437
437
438 if (ret >= MAX_INDENT)
438 if (ret >= MAX_INDENT)
439 return MAX_INDENT;
439 return MAX_INDENT;
440 }
440 }
441
441
442 /* The line contains only whitespace. */
442 /* The line contains only whitespace. */
443 return -1;
443 return -1;
444 }
444 }
445
445
446 /*
446 /*
447 * If more than this number of consecutive blank rows are found, just return this
447 * If more than this number of consecutive blank rows are found, just return this
448 * value. This avoids requiring O(N^2) work for pathological cases, and also
448 * value. This avoids requiring O(N^2) work for pathological cases, and also
449 * ensures that the output of score_split fits in an int.
449 * ensures that the output of score_split fits in an int.
450 */
450 */
451 #define MAX_BLANKS 20
451 #define MAX_BLANKS 20
452
452
453 /* Characteristics measured about a hypothetical split position. */
453 /* Characteristics measured about a hypothetical split position. */
454 struct split_measurement {
454 struct split_measurement {
455 /*
455 /*
456 * Is the split at the end of the file (aside from any blank lines)?
456 * Is the split at the end of the file (aside from any blank lines)?
457 */
457 */
458 int end_of_file;
458 int end_of_file;
459
459
460 /*
460 /*
461 * How much is the line immediately following the split indented (or -1 if
461 * How much is the line immediately following the split indented (or -1 if
462 * the line is blank):
462 * the line is blank):
463 */
463 */
464 int indent;
464 int indent;
465
465
466 /*
466 /*
467 * How many consecutive lines above the split are blank?
467 * How many consecutive lines above the split are blank?
468 */
468 */
469 int pre_blank;
469 int pre_blank;
470
470
471 /*
471 /*
472 * How much is the nearest non-blank line above the split indented (or -1
472 * How much is the nearest non-blank line above the split indented (or -1
473 * if there is no such line)?
473 * if there is no such line)?
474 */
474 */
475 int pre_indent;
475 int pre_indent;
476
476
477 /*
477 /*
478 * How many lines after the line following the split are blank?
478 * How many lines after the line following the split are blank?
479 */
479 */
480 int post_blank;
480 int post_blank;
481
481
482 /*
482 /*
483 * How much is the nearest non-blank line after the line following the
483 * How much is the nearest non-blank line after the line following the
484 * split indented (or -1 if there is no such line)?
484 * split indented (or -1 if there is no such line)?
485 */
485 */
486 int post_indent;
486 int post_indent;
487 };
487 };
488
488
489 struct split_score {
489 struct split_score {
490 /* The effective indent of this split (smaller is preferred). */
490 /* The effective indent of this split (smaller is preferred). */
491 int effective_indent;
491 int effective_indent;
492
492
493 /* Penalty for this split (smaller is preferred). */
493 /* Penalty for this split (smaller is preferred). */
494 int penalty;
494 int penalty;
495 };
495 };
496
496
497 /*
497 /*
498 * Fill m with information about a hypothetical split of xdf above line split.
498 * Fill m with information about a hypothetical split of xdf above line split.
499 */
499 */
500 static void measure_split(const xdfile_t *xdf, long split,
500 static void measure_split(const xdfile_t *xdf, long split,
501 struct split_measurement *m)
501 struct split_measurement *m)
502 {
502 {
503 long i;
503 long i;
504
504
505 if (split >= xdf->nrec) {
505 if (split >= xdf->nrec) {
506 m->end_of_file = 1;
506 m->end_of_file = 1;
507 m->indent = -1;
507 m->indent = -1;
508 } else {
508 } else {
509 m->end_of_file = 0;
509 m->end_of_file = 0;
510 m->indent = get_indent(xdf->recs[split]);
510 m->indent = get_indent(xdf->recs[split]);
511 }
511 }
512
512
513 m->pre_blank = 0;
513 m->pre_blank = 0;
514 m->pre_indent = -1;
514 m->pre_indent = -1;
515 for (i = split - 1; i >= 0; i--) {
515 for (i = split - 1; i >= 0; i--) {
516 m->pre_indent = get_indent(xdf->recs[i]);
516 m->pre_indent = get_indent(xdf->recs[i]);
517 if (m->pre_indent != -1)
517 if (m->pre_indent != -1)
518 break;
518 break;
519 m->pre_blank += 1;
519 m->pre_blank += 1;
520 if (m->pre_blank == MAX_BLANKS) {
520 if (m->pre_blank == MAX_BLANKS) {
521 m->pre_indent = 0;
521 m->pre_indent = 0;
522 break;
522 break;
523 }
523 }
524 }
524 }
525
525
526 m->post_blank = 0;
526 m->post_blank = 0;
527 m->post_indent = -1;
527 m->post_indent = -1;
528 for (i = split + 1; i < xdf->nrec; i++) {
528 for (i = split + 1; i < xdf->nrec; i++) {
529 m->post_indent = get_indent(xdf->recs[i]);
529 m->post_indent = get_indent(xdf->recs[i]);
530 if (m->post_indent != -1)
530 if (m->post_indent != -1)
531 break;
531 break;
532 m->post_blank += 1;
532 m->post_blank += 1;
533 if (m->post_blank == MAX_BLANKS) {
533 if (m->post_blank == MAX_BLANKS) {
534 m->post_indent = 0;
534 m->post_indent = 0;
535 break;
535 break;
536 }
536 }
537 }
537 }
538 }
538 }
539
539
540 /*
540 /*
541 * The empirically-determined weight factors used by score_split() below.
541 * The empirically-determined weight factors used by score_split() below.
542 * Larger values means that the position is a less favorable place to split.
542 * Larger values means that the position is a less favorable place to split.
543 *
543 *
544 * Note that scores are only ever compared against each other, so multiplying
544 * Note that scores are only ever compared against each other, so multiplying
545 * all of these weight/penalty values by the same factor wouldn't change the
545 * all of these weight/penalty values by the same factor wouldn't change the
546 * heuristic's behavior. Still, we need to set that arbitrary scale *somehow*.
546 * heuristic's behavior. Still, we need to set that arbitrary scale *somehow*.
547 * In practice, these numbers are chosen to be large enough that they can be
547 * In practice, these numbers are chosen to be large enough that they can be
548 * adjusted relative to each other with sufficient precision despite using
548 * adjusted relative to each other with sufficient precision despite using
549 * integer math.
549 * integer math.
550 */
550 */
551
551
552 /* Penalty if there are no non-blank lines before the split */
552 /* Penalty if there are no non-blank lines before the split */
553 #define START_OF_FILE_PENALTY 1
553 #define START_OF_FILE_PENALTY 1
554
554
555 /* Penalty if there are no non-blank lines after the split */
555 /* Penalty if there are no non-blank lines after the split */
556 #define END_OF_FILE_PENALTY 21
556 #define END_OF_FILE_PENALTY 21
557
557
558 /* Multiplier for the number of blank lines around the split */
558 /* Multiplier for the number of blank lines around the split */
559 #define TOTAL_BLANK_WEIGHT (-30)
559 #define TOTAL_BLANK_WEIGHT (-30)
560
560
561 /* Multiplier for the number of blank lines after the split */
561 /* Multiplier for the number of blank lines after the split */
562 #define POST_BLANK_WEIGHT 6
562 #define POST_BLANK_WEIGHT 6
563
563
564 /*
564 /*
565 * Penalties applied if the line is indented more than its predecessor
565 * Penalties applied if the line is indented more than its predecessor
566 */
566 */
567 #define RELATIVE_INDENT_PENALTY (-4)
567 #define RELATIVE_INDENT_PENALTY (-4)
568 #define RELATIVE_INDENT_WITH_BLANK_PENALTY 10
568 #define RELATIVE_INDENT_WITH_BLANK_PENALTY 10
569
569
570 /*
570 /*
571 * Penalties applied if the line is indented less than both its predecessor and
571 * Penalties applied if the line is indented less than both its predecessor and
572 * its successor
572 * its successor
573 */
573 */
574 #define RELATIVE_OUTDENT_PENALTY 24
574 #define RELATIVE_OUTDENT_PENALTY 24
575 #define RELATIVE_OUTDENT_WITH_BLANK_PENALTY 17
575 #define RELATIVE_OUTDENT_WITH_BLANK_PENALTY 17
576
576
577 /*
577 /*
578 * Penalties applied if the line is indented less than its predecessor but not
578 * Penalties applied if the line is indented less than its predecessor but not
579 * less than its successor
579 * less than its successor
580 */
580 */
581 #define RELATIVE_DEDENT_PENALTY 23
581 #define RELATIVE_DEDENT_PENALTY 23
582 #define RELATIVE_DEDENT_WITH_BLANK_PENALTY 17
582 #define RELATIVE_DEDENT_WITH_BLANK_PENALTY 17
583
583
584 /*
584 /*
585 * We only consider whether the sum of the effective indents for splits are
585 * We only consider whether the sum of the effective indents for splits are
586 * less than (-1), equal to (0), or greater than (+1) each other. The resulting
586 * less than (-1), equal to (0), or greater than (+1) each other. The resulting
587 * value is multiplied by the following weight and combined with the penalty to
587 * value is multiplied by the following weight and combined with the penalty to
588 * determine the better of two scores.
588 * determine the better of two scores.
589 */
589 */
590 #define INDENT_WEIGHT 60
590 #define INDENT_WEIGHT 60
591
591
592 /*
592 /*
593 * Compute a badness score for the hypothetical split whose measurements are
593 * Compute a badness score for the hypothetical split whose measurements are
594 * stored in m. The weight factors were determined empirically using the tools and
594 * stored in m. The weight factors were determined empirically using the tools and
595 * corpus described in
595 * corpus described in
596 *
596 *
597 * https://github.com/mhagger/diff-slider-tools
597 * https://github.com/mhagger/diff-slider-tools
598 *
598 *
599 * Also see that project if you want to improve the weights based on, for example,
599 * Also see that project if you want to improve the weights based on, for example,
600 * a larger or more diverse corpus.
600 * a larger or more diverse corpus.
601 */
601 */
602 static void score_add_split(const struct split_measurement *m, struct split_score *s)
602 static void score_add_split(const struct split_measurement *m, struct split_score *s)
603 {
603 {
604 /*
604 /*
605 * A place to accumulate penalty factors (positive makes this index more
605 * A place to accumulate penalty factors (positive makes this index more
606 * favored):
606 * favored):
607 */
607 */
608 int post_blank, total_blank, indent, any_blanks;
608 int post_blank, total_blank, indent, any_blanks;
609
609
610 if (m->pre_indent == -1 && m->pre_blank == 0)
610 if (m->pre_indent == -1 && m->pre_blank == 0)
611 s->penalty += START_OF_FILE_PENALTY;
611 s->penalty += START_OF_FILE_PENALTY;
612
612
613 if (m->end_of_file)
613 if (m->end_of_file)
614 s->penalty += END_OF_FILE_PENALTY;
614 s->penalty += END_OF_FILE_PENALTY;
615
615
616 /*
616 /*
617 * Set post_blank to the number of blank lines following the split,
617 * Set post_blank to the number of blank lines following the split,
618 * including the line immediately after the split:
618 * including the line immediately after the split:
619 */
619 */
620 post_blank = (m->indent == -1) ? 1 + m->post_blank : 0;
620 post_blank = (m->indent == -1) ? 1 + m->post_blank : 0;
621 total_blank = m->pre_blank + post_blank;
621 total_blank = m->pre_blank + post_blank;
622
622
623 /* Penalties based on nearby blank lines: */
623 /* Penalties based on nearby blank lines: */
624 s->penalty += TOTAL_BLANK_WEIGHT * total_blank;
624 s->penalty += TOTAL_BLANK_WEIGHT * total_blank;
625 s->penalty += POST_BLANK_WEIGHT * post_blank;
625 s->penalty += POST_BLANK_WEIGHT * post_blank;
626
626
627 if (m->indent != -1)
627 if (m->indent != -1)
628 indent = m->indent;
628 indent = m->indent;
629 else
629 else
630 indent = m->post_indent;
630 indent = m->post_indent;
631
631
632 any_blanks = (total_blank != 0);
632 any_blanks = (total_blank != 0);
633
633
634 /* Note that the effective indent is -1 at the end of the file: */
634 /* Note that the effective indent is -1 at the end of the file: */
635 s->effective_indent += indent;
635 s->effective_indent += indent;
636
636
637 if (indent == -1) {
637 if (indent == -1) {
638 /* No additional adjustments needed. */
638 /* No additional adjustments needed. */
639 } else if (m->pre_indent == -1) {
639 } else if (m->pre_indent == -1) {
640 /* No additional adjustments needed. */
640 /* No additional adjustments needed. */
641 } else if (indent > m->pre_indent) {
641 } else if (indent > m->pre_indent) {
642 /*
642 /*
643 * The line is indented more than its predecessor.
643 * The line is indented more than its predecessor.
644 */
644 */
645 s->penalty += any_blanks ?
645 s->penalty += any_blanks ?
646 RELATIVE_INDENT_WITH_BLANK_PENALTY :
646 RELATIVE_INDENT_WITH_BLANK_PENALTY :
647 RELATIVE_INDENT_PENALTY;
647 RELATIVE_INDENT_PENALTY;
648 } else if (indent == m->pre_indent) {
648 } else if (indent == m->pre_indent) {
649 /*
649 /*
650 * The line has the same indentation level as its predecessor.
650 * The line has the same indentation level as its predecessor.
651 * No additional adjustments needed.
651 * No additional adjustments needed.
652 */
652 */
653 } else {
653 } else {
654 /*
654 /*
655 * The line is indented less than its predecessor. It could be
655 * The line is indented less than its predecessor. It could be
656 * the block terminator of the previous block, but it could
656 * the block terminator of the previous block, but it could
657 * also be the start of a new block (e.g., an "else" block, or
657 * also be the start of a new block (e.g., an "else" block, or
658 * maybe the previous block didn't have a block terminator).
658 * maybe the previous block didn't have a block terminator).
659 * Try to distinguish those cases based on what comes next:
659 * Try to distinguish those cases based on what comes next:
660 */
660 */
661 if (m->post_indent != -1 && m->post_indent > indent) {
661 if (m->post_indent != -1 && m->post_indent > indent) {
662 /*
662 /*
663 * The following line is indented more. So it is likely
663 * The following line is indented more. So it is likely
664 * that this line is the start of a block.
664 * that this line is the start of a block.
665 */
665 */
666 s->penalty += any_blanks ?
666 s->penalty += any_blanks ?
667 RELATIVE_OUTDENT_WITH_BLANK_PENALTY :
667 RELATIVE_OUTDENT_WITH_BLANK_PENALTY :
668 RELATIVE_OUTDENT_PENALTY;
668 RELATIVE_OUTDENT_PENALTY;
669 } else {
669 } else {
670 /*
670 /*
671 * That was probably the end of a block.
671 * That was probably the end of a block.
672 */
672 */
673 s->penalty += any_blanks ?
673 s->penalty += any_blanks ?
674 RELATIVE_DEDENT_WITH_BLANK_PENALTY :
674 RELATIVE_DEDENT_WITH_BLANK_PENALTY :
675 RELATIVE_DEDENT_PENALTY;
675 RELATIVE_DEDENT_PENALTY;
676 }
676 }
677 }
677 }
678 }
678 }
679
679
680 static int score_cmp(struct split_score *s1, struct split_score *s2)
680 static int score_cmp(struct split_score *s1, struct split_score *s2)
681 {
681 {
682 /* -1 if s1.effective_indent < s2->effective_indent, etc. */
682 /* -1 if s1.effective_indent < s2->effective_indent, etc. */
683 int cmp_indents = ((s1->effective_indent > s2->effective_indent) -
683 int cmp_indents = ((s1->effective_indent > s2->effective_indent) -
684 (s1->effective_indent < s2->effective_indent));
684 (s1->effective_indent < s2->effective_indent));
685
685
686 return INDENT_WEIGHT * cmp_indents + (s1->penalty - s2->penalty);
686 return INDENT_WEIGHT * cmp_indents + (s1->penalty - s2->penalty);
687 }
687 }
688
688
689 /*
689 /*
690 * Represent a group of changed lines in an xdfile_t (i.e., a contiguous group
690 * Represent a group of changed lines in an xdfile_t (i.e., a contiguous group
691 * of lines that was inserted or deleted from the corresponding version of the
691 * of lines that was inserted or deleted from the corresponding version of the
692 * file). We consider there to be such a group at the beginning of the file, at
692 * file). We consider there to be such a group at the beginning of the file, at
693 * the end of the file, and between any two unchanged lines, though most such
693 * the end of the file, and between any two unchanged lines, though most such
694 * groups will usually be empty.
694 * groups will usually be empty.
695 *
695 *
696 * If the first line in a group is equal to the line following the group, then
696 * If the first line in a group is equal to the line following the group, then
697 * the group can be slid down. Similarly, if the last line in a group is equal
697 * the group can be slid down. Similarly, if the last line in a group is equal
698 * to the line preceding the group, then the group can be slid up. See
698 * to the line preceding the group, then the group can be slid up. See
699 * group_slide_down() and group_slide_up().
699 * group_slide_down() and group_slide_up().
700 *
700 *
701 * Note that loops that are testing for changed lines in xdf->rchg do not need
701 * Note that loops that are testing for changed lines in xdf->rchg do not need
702 * index bounding since the array is prepared with a zero at position -1 and N.
702 * index bounding since the array is prepared with a zero at position -1 and N.
703 */
703 */
704 struct xdlgroup {
704 struct xdlgroup {
705 /*
705 /*
706 * The index of the first changed line in the group, or the index of
706 * The index of the first changed line in the group, or the index of
707 * the unchanged line above which the (empty) group is located.
707 * the unchanged line above which the (empty) group is located.
708 */
708 */
709 long start;
709 long start;
710
710
711 /*
711 /*
712 * The index of the first unchanged line after the group. For an empty
712 * The index of the first unchanged line after the group. For an empty
713 * group, end is equal to start.
713 * group, end is equal to start.
714 */
714 */
715 long end;
715 long end;
716 };
716 };
717
717
718 /*
718 /*
719 * Initialize g to point at the first group in xdf.
719 * Initialize g to point at the first group in xdf.
720 */
720 */
721 static void group_init(xdfile_t *xdf, struct xdlgroup *g)
721 static void group_init(xdfile_t *xdf, struct xdlgroup *g)
722 {
722 {
723 g->start = g->end = 0;
723 g->start = g->end = 0;
724 while (xdf->rchg[g->end])
724 while (xdf->rchg[g->end])
725 g->end++;
725 g->end++;
726 }
726 }
727
727
728 /*
728 /*
729 * Move g to describe the next (possibly empty) group in xdf and return 0. If g
729 * Move g to describe the next (possibly empty) group in xdf and return 0. If g
730 * is already at the end of the file, do nothing and return -1.
730 * is already at the end of the file, do nothing and return -1.
731 */
731 */
732 static inline int group_next(xdfile_t *xdf, struct xdlgroup *g)
732 static inline int group_next(xdfile_t *xdf, struct xdlgroup *g)
733 {
733 {
734 if (g->end == xdf->nrec)
734 if (g->end == xdf->nrec)
735 return -1;
735 return -1;
736
736
737 g->start = g->end + 1;
737 g->start = g->end + 1;
738 for (g->end = g->start; xdf->rchg[g->end]; g->end++)
738 for (g->end = g->start; xdf->rchg[g->end]; g->end++)
739 ;
739 ;
740
740
741 return 0;
741 return 0;
742 }
742 }
743
743
744 /*
744 /*
745 * Move g to describe the previous (possibly empty) group in xdf and return 0.
745 * Move g to describe the previous (possibly empty) group in xdf and return 0.
746 * If g is already at the beginning of the file, do nothing and return -1.
746 * If g is already at the beginning of the file, do nothing and return -1.
747 */
747 */
748 static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g)
748 static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g)
749 {
749 {
750 if (g->start == 0)
750 if (g->start == 0)
751 return -1;
751 return -1;
752
752
753 g->end = g->start - 1;
753 g->end = g->start - 1;
754 for (g->start = g->end; xdf->rchg[g->start - 1]; g->start--)
754 for (g->start = g->end; xdf->rchg[g->start - 1]; g->start--)
755 ;
755 ;
756
756
757 return 0;
757 return 0;
758 }
758 }
759
759
760 /*
760 /*
761 * If g can be slid toward the end of the file, do so, and if it bumps into a
761 * If g can be slid toward the end of the file, do so, and if it bumps into a
762 * following group, expand this group to include it. Return 0 on success or -1
762 * following group, expand this group to include it. Return 0 on success or -1
763 * if g cannot be slid down.
763 * if g cannot be slid down.
764 */
764 */
765 static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g, long flags)
765 static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g, long flags)
766 {
766 {
767 if (g->end < xdf->nrec &&
767 if (g->end < xdf->nrec &&
768 recs_match(xdf->recs[g->start], xdf->recs[g->end], flags)) {
768 recs_match(xdf->recs[g->start], xdf->recs[g->end], flags)) {
769 xdf->rchg[g->start++] = 0;
769 xdf->rchg[g->start++] = 0;
770 xdf->rchg[g->end++] = 1;
770 xdf->rchg[g->end++] = 1;
771
771
772 while (xdf->rchg[g->end])
772 while (xdf->rchg[g->end])
773 g->end++;
773 g->end++;
774
774
775 return 0;
775 return 0;
776 } else {
776 } else {
777 return -1;
777 return -1;
778 }
778 }
779 }
779 }
780
780
781 /*
781 /*
782 * If g can be slid toward the beginning of the file, do so, and if it bumps
782 * If g can be slid toward the beginning of the file, do so, and if it bumps
783 * into a previous group, expand this group to include it. Return 0 on success
783 * into a previous group, expand this group to include it. Return 0 on success
784 * or -1 if g cannot be slid up.
784 * or -1 if g cannot be slid up.
785 */
785 */
786 static int group_slide_up(xdfile_t *xdf, struct xdlgroup *g, long flags)
786 static int group_slide_up(xdfile_t *xdf, struct xdlgroup *g, long flags)
787 {
787 {
788 if (g->start > 0 &&
788 if (g->start > 0 &&
789 recs_match(xdf->recs[g->start - 1], xdf->recs[g->end - 1], flags)) {
789 recs_match(xdf->recs[g->start - 1], xdf->recs[g->end - 1], flags)) {
790 xdf->rchg[--g->start] = 1;
790 xdf->rchg[--g->start] = 1;
791 xdf->rchg[--g->end] = 0;
791 xdf->rchg[--g->end] = 0;
792
792
793 while (xdf->rchg[g->start - 1])
793 while (xdf->rchg[g->start - 1])
794 g->start--;
794 g->start--;
795
795
796 return 0;
796 return 0;
797 } else {
797 } else {
798 return -1;
798 return -1;
799 }
799 }
800 }
800 }
801
801
802 static void xdl_bug(const char *msg)
802 static void xdl_bug(const char *msg)
803 {
803 {
804 fprintf(stderr, "BUG: %s\n", msg);
804 fprintf(stderr, "BUG: %s\n", msg);
805 exit(1);
805 exit(1);
806 }
806 }
807
807
808 /*
808 /*
809 * For indentation heuristic, skip searching for better slide position after
809 * For indentation heuristic, skip searching for better slide position after
810 * checking MAX_BORING lines without finding an improvement. This defends the
810 * checking MAX_BORING lines without finding an improvement. This defends the
811 * indentation heuristic logic against pathological cases. The value is not
811 * indentation heuristic logic against pathological cases. The value is not
812 * picked scientifically but should be good enough.
812 * picked scientifically but should be good enough.
813 */
813 */
814 #define MAX_BORING 100
814 #define MAX_BORING 100
815
815
816 /*
816 /*
817 * Move back and forward change groups for a consistent and pretty diff output.
817 * Move back and forward change groups for a consistent and pretty diff output.
818 * This also helps in finding joinable change groups and reducing the diff
818 * This also helps in finding joinable change groups and reducing the diff
819 * size.
819 * size.
820 */
820 */
821 int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) {
821 int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) {
822 struct xdlgroup g, go;
822 struct xdlgroup g, go;
823 long earliest_end, end_matching_other;
823 long earliest_end, end_matching_other;
824 long groupsize;
824 long groupsize;
825
825
826 group_init(xdf, &g);
826 group_init(xdf, &g);
827 group_init(xdfo, &go);
827 group_init(xdfo, &go);
828
828
829 while (1) {
829 while (1) {
830 /* If the group is empty in the to-be-compacted file, skip it: */
830 /* If the group is empty in the to-be-compacted file, skip it: */
831 if (g.end == g.start)
831 if (g.end == g.start)
832 goto next;
832 goto next;
833
833
834 /*
834 /*
835 * Now shift the change up and then down as far as possible in
835 * Now shift the change up and then down as far as possible in
836 * each direction. If it bumps into any other changes, merge them.
836 * each direction. If it bumps into any other changes, merge them.
837 */
837 */
838 do {
838 do {
839 groupsize = g.end - g.start;
839 groupsize = g.end - g.start;
840
840
841 /*
841 /*
842 * Keep track of the last "end" index that causes this
842 * Keep track of the last "end" index that causes this
843 * group to align with a group of changed lines in the
843 * group to align with a group of changed lines in the
844 * other file. -1 indicates that we haven't found such
844 * other file. -1 indicates that we haven't found such
845 * a match yet:
845 * a match yet:
846 */
846 */
847 end_matching_other = -1;
847 end_matching_other = -1;
848
848
849 /* Shift the group backward as much as possible: */
849 /* Shift the group backward as much as possible: */
850 while (!group_slide_up(xdf, &g, flags))
850 while (!group_slide_up(xdf, &g, flags))
851 if (group_previous(xdfo, &go))
851 if (group_previous(xdfo, &go))
852 xdl_bug("group sync broken sliding up");
852 xdl_bug("group sync broken sliding up");
853
853
854 /*
854 /*
855 * This is this highest that this group can be shifted.
855 * This is this highest that this group can be shifted.
856 * Record its end index:
856 * Record its end index:
857 */
857 */
858 earliest_end = g.end;
858 earliest_end = g.end;
859
859
860 if (go.end > go.start)
860 if (go.end > go.start)
861 end_matching_other = g.end;
861 end_matching_other = g.end;
862
862
863 /* Now shift the group forward as far as possible: */
863 /* Now shift the group forward as far as possible: */
864 while (1) {
864 while (1) {
865 if (group_slide_down(xdf, &g, flags))
865 if (group_slide_down(xdf, &g, flags))
866 break;
866 break;
867 if (group_next(xdfo, &go))
867 if (group_next(xdfo, &go))
868 xdl_bug("group sync broken sliding down");
868 xdl_bug("group sync broken sliding down");
869
869
870 if (go.end > go.start)
870 if (go.end > go.start)
871 end_matching_other = g.end;
871 end_matching_other = g.end;
872 }
872 }
873 } while (groupsize != g.end - g.start);
873 } while (groupsize != g.end - g.start);
874
874
875 /*
875 /*
876 * If the group can be shifted, then we can possibly use this
876 * If the group can be shifted, then we can possibly use this
877 * freedom to produce a more intuitive diff.
877 * freedom to produce a more intuitive diff.
878 *
878 *
879 * The group is currently shifted as far down as possible, so the
879 * The group is currently shifted as far down as possible, so the
880 * heuristics below only have to handle upwards shifts.
880 * heuristics below only have to handle upwards shifts.
881 */
881 */
882
882
883 if (g.end == earliest_end) {
883 if (g.end == earliest_end) {
884 /* no shifting was possible */
884 /* no shifting was possible */
885 } else if (end_matching_other != -1) {
885 } else if (end_matching_other != -1) {
886 /*
886 /*
887 * Move the possibly merged group of changes back to line
887 * Move the possibly merged group of changes back to line
888 * up with the last group of changes from the other file
888 * up with the last group of changes from the other file
889 * that it can align with.
889 * that it can align with.
890 */
890 */
891 while (go.end == go.start) {
891 while (go.end == go.start) {
892 if (group_slide_up(xdf, &g, flags))
892 if (group_slide_up(xdf, &g, flags))
893 xdl_bug("match disappeared");
893 xdl_bug("match disappeared");
894 if (group_previous(xdfo, &go))
894 if (group_previous(xdfo, &go))
895 xdl_bug("group sync broken sliding to match");
895 xdl_bug("group sync broken sliding to match");
896 }
896 }
897 } else if (flags & XDF_INDENT_HEURISTIC) {
897 } else if (flags & XDF_INDENT_HEURISTIC) {
898 /*
898 /*
899 * Indent heuristic: a group of pure add/delete lines
899 * Indent heuristic: a group of pure add/delete lines
900 * implies two splits, one between the end of the "before"
900 * implies two splits, one between the end of the "before"
901 * context and the start of the group, and another between
901 * context and the start of the group, and another between
902 * the end of the group and the beginning of the "after"
902 * the end of the group and the beginning of the "after"
903 * context. Some splits are aesthetically better and some
903 * context. Some splits are aesthetically better and some
904 * are worse. We compute a badness "score" for each split,
904 * are worse. We compute a badness "score" for each split,
905 * and add the scores for the two splits to define a
905 * and add the scores for the two splits to define a
906 * "score" for each position that the group can be shifted
906 * "score" for each position that the group can be shifted
907 * to. Then we pick the shift with the lowest score.
907 * to. Then we pick the shift with the lowest score.
908 */
908 */
909 long shift, best_shift = -1;
909 long shift, best_shift = -1;
910 struct split_score best_score;
910 struct split_score best_score;
911
911
912 /*
912 /*
913 * This is O(N * MAX_BLANKS) (N = shift-able lines).
913 * This is O(N * MAX_BLANKS) (N = shift-able lines).
914 * Even with MAX_BLANKS bounded to a small value, a
914 * Even with MAX_BLANKS bounded to a small value, a
915 * large N could still make this loop take several
915 * large N could still make this loop take several
916 * times longer than the main diff algorithm. The
916 * times longer than the main diff algorithm. The
917 * "boring" value is to help cut down N to something
917 * "boring" value is to help cut down N to something
918 * like (MAX_BORING + groupsize).
918 * like (MAX_BORING + groupsize).
919 *
919 *
920 * Scan from bottom to top. So we can exit the loop
920 * Scan from bottom to top. So we can exit the loop
921 * without compromising the assumption "for a same best
921 * without compromising the assumption "for a same best
922 * score, pick the bottommost shift".
922 * score, pick the bottommost shift".
923 */
923 */
924 int boring = 0;
924 int boring = 0;
925 for (shift = g.end; shift >= earliest_end; shift--) {
925 for (shift = g.end; shift >= earliest_end; shift--) {
926 struct split_measurement m;
926 struct split_measurement m;
927 struct split_score score = {0, 0};
927 struct split_score score = {0, 0};
928 int cmp;
928 int cmp;
929
929
930 measure_split(xdf, shift, &m);
930 measure_split(xdf, shift, &m);
931 score_add_split(&m, &score);
931 score_add_split(&m, &score);
932 measure_split(xdf, shift - groupsize, &m);
932 measure_split(xdf, shift - groupsize, &m);
933 score_add_split(&m, &score);
933 score_add_split(&m, &score);
934
934
935 if (best_shift == -1) {
935 if (best_shift == -1) {
936 cmp = -1;
936 cmp = -1;
937 } else {
937 } else {
938 cmp = score_cmp(&score, &best_score);
938 cmp = score_cmp(&score, &best_score);
939 }
939 }
940 if (cmp < 0) {
940 if (cmp < 0) {
941 boring = 0;
941 boring = 0;
942 best_score.effective_indent = score.effective_indent;
942 best_score.effective_indent = score.effective_indent;
943 best_score.penalty = score.penalty;
943 best_score.penalty = score.penalty;
944 best_shift = shift;
944 best_shift = shift;
945 } else {
945 } else {
946 boring += 1;
946 boring += 1;
947 if (boring >= MAX_BORING)
947 if (boring >= MAX_BORING)
948 break;
948 break;
949 }
949 }
950 }
950 }
951
951
952 while (g.end > best_shift) {
952 while (g.end > best_shift) {
953 if (group_slide_up(xdf, &g, flags))
953 if (group_slide_up(xdf, &g, flags))
954 xdl_bug("best shift unreached");
954 xdl_bug("best shift unreached");
955 if (group_previous(xdfo, &go))
955 if (group_previous(xdfo, &go))
956 xdl_bug("group sync broken sliding to blank line");
956 xdl_bug("group sync broken sliding to blank line");
957 }
957 }
958 }
958 }
959
959
960 next:
960 next:
961 /* Move past the just-processed group: */
961 /* Move past the just-processed group: */
962 if (group_next(xdf, &g))
962 if (group_next(xdf, &g))
963 break;
963 break;
964 if (group_next(xdfo, &go))
964 if (group_next(xdfo, &go))
965 xdl_bug("group sync broken moving to next group");
965 xdl_bug("group sync broken moving to next group");
966 }
966 }
967
967
968 if (!group_next(xdfo, &go))
968 if (!group_next(xdfo, &go))
969 xdl_bug("group sync broken at end of file");
969 xdl_bug("group sync broken at end of file");
970
970
971 return 0;
971 return 0;
972 }
972 }
973
973
974
974
975 int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr) {
975 int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr) {
976 xdchange_t *cscr = NULL, *xch;
976 xdchange_t *cscr = NULL, *xch;
977 char *rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg;
977 char *rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg;
978 long i1, i2, l1, l2;
978 long i1, i2, l1, l2;
979
979
980 /*
980 /*
981 * Trivial. Collects "groups" of changes and creates an edit script.
981 * Trivial. Collects "groups" of changes and creates an edit script.
982 */
982 */
983 for (i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >= 0 || i2 >= 0; i1--, i2--)
983 for (i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >= 0 || i2 >= 0; i1--, i2--)
984 if (rchg1[i1 - 1] || rchg2[i2 - 1]) {
984 if (rchg1[i1 - 1] || rchg2[i2 - 1]) {
985 for (l1 = i1; rchg1[i1 - 1]; i1--);
985 for (l1 = i1; rchg1[i1 - 1]; i1--);
986 for (l2 = i2; rchg2[i2 - 1]; i2--);
986 for (l2 = i2; rchg2[i2 - 1]; i2--);
987
987
988 if (!(xch = xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) {
988 if (!(xch = xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) {
989 xdl_free_script(cscr);
989 xdl_free_script(cscr);
990 return -1;
990 return -1;
991 }
991 }
992 cscr = xch;
992 cscr = xch;
993 }
993 }
994
994
995 *xscr = cscr;
995 *xscr = cscr;
996
996
997 return 0;
997 return 0;
998 }
998 }
999
999
1000
1000
1001 void xdl_free_script(xdchange_t *xscr) {
1001 void xdl_free_script(xdchange_t *xscr) {
1002 xdchange_t *xch;
1002 xdchange_t *xch;
1003
1003
1004 while ((xch = xscr) != NULL) {
1004 while ((xch = xscr) != NULL) {
1005 xscr = xscr->next;
1005 xscr = xscr->next;
1006 xdl_free(xch);
1006 xdl_free(xch);
1007 }
1007 }
1008 }
1008 }
1009
1009
1010
1010
1011 /*
1011 /*
1012 * Starting at the passed change atom, find the latest change atom to be included
1012 * Starting at the passed change atom, find the latest change atom to be included
1013 * inside the differential hunk according to the specified configuration.
1013 * inside the differential hunk according to the specified configuration.
1014 * Also advance xscr if the first changes must be discarded.
1014 * Also advance xscr if the first changes must be discarded.
1015 */
1015 */
1016 xdchange_t *xdl_get_hunk(xdchange_t **xscr, xdemitconf_t const *xecfg)
1016 xdchange_t *xdl_get_hunk(xdchange_t **xscr, xdemitconf_t const *xecfg)
1017 {
1017 {
1018 xdchange_t *xch, *xchp, *lxch;
1018 xdchange_t *xch, *xchp, *lxch;
1019 long max_common = 0;
1019 long max_common = 0;
1020 long max_ignorable = 0;
1020 long max_ignorable = 0;
1021 unsigned long ignored = 0; /* number of ignored blank lines */
1021 unsigned long ignored = 0; /* number of ignored blank lines */
1022
1022
1023 /* remove ignorable changes that are too far before other changes */
1023 /* remove ignorable changes that are too far before other changes */
1024 for (xchp = *xscr; xchp && xchp->ignore; xchp = xchp->next) {
1024 for (xchp = *xscr; xchp && xchp->ignore; xchp = xchp->next) {
1025 xch = xchp->next;
1025 xch = xchp->next;
1026
1026
1027 if (xch == NULL ||
1027 if (xch == NULL ||
1028 xch->i1 - (xchp->i1 + xchp->chg1) >= max_ignorable)
1028 xch->i1 - (xchp->i1 + xchp->chg1) >= max_ignorable)
1029 *xscr = xch;
1029 *xscr = xch;
1030 }
1030 }
1031
1031
1032 if (*xscr == NULL)
1032 if (*xscr == NULL)
1033 return NULL;
1033 return NULL;
1034
1034
1035 lxch = *xscr;
1035 lxch = *xscr;
1036
1036
1037 for (xchp = *xscr, xch = xchp->next; xch; xchp = xch, xch = xch->next) {
1037 for (xchp = *xscr, xch = xchp->next; xch; xchp = xch, xch = xch->next) {
1038 long distance = xch->i1 - (xchp->i1 + xchp->chg1);
1038 long distance = xch->i1 - (xchp->i1 + xchp->chg1);
1039 if (distance > max_common)
1039 if (distance > max_common)
1040 break;
1040 break;
1041
1041
1042 if (distance < max_ignorable && (!xch->ignore || lxch == xchp)) {
1042 if (distance < max_ignorable && (!xch->ignore || lxch == xchp)) {
1043 lxch = xch;
1043 lxch = xch;
1044 ignored = 0;
1044 ignored = 0;
1045 } else if (distance < max_ignorable && xch->ignore) {
1045 } else if (distance < max_ignorable && xch->ignore) {
1046 ignored += xch->chg2;
1046 ignored += xch->chg2;
1047 } else if (lxch != xchp &&
1047 } else if (lxch != xchp &&
1048 xch->i1 + ignored - (lxch->i1 + lxch->chg1) > max_common) {
1048 xch->i1 + ignored - (lxch->i1 + lxch->chg1) > max_common) {
1049 break;
1049 break;
1050 } else if (!xch->ignore) {
1050 } else if (!xch->ignore) {
1051 lxch = xch;
1051 lxch = xch;
1052 ignored = 0;
1052 ignored = 0;
1053 } else {
1053 } else {
1054 ignored += xch->chg2;
1054 ignored += xch->chg2;
1055 }
1055 }
1056 }
1056 }
1057
1057
1058 return lxch;
1058 return lxch;
1059 }
1059 }
1060
1060
1061
1061
1062 static int xdl_call_hunk_func(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
1062 static int xdl_call_hunk_func(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
1063 xdemitconf_t const *xecfg)
1063 xdemitconf_t const *xecfg)
1064 {
1064 {
1065 long p = xe->nprefix, s = xe->nsuffix;
1065 xdchange_t *xch, *xche;
1066 xdchange_t *xch, *xche;
1066
1067
1067 if (!xecfg->hunk_func)
1068 if (!xecfg->hunk_func)
1068 return -1;
1069 return -1;
1069
1070
1070 if ((xecfg->flags & XDL_EMIT_BDIFFHUNK) != 0) {
1071 if ((xecfg->flags & XDL_EMIT_BDIFFHUNK) != 0) {
1071 long i1 = 0, i2 = 0, n1 = xe->xdf1.nrec, n2 = xe->xdf2.nrec;
1072 long i1 = 0, i2 = 0, n1 = xe->xdf1.nrec, n2 = xe->xdf2.nrec;
1072 for (xch = xscr; xch; xch = xche->next) {
1073 for (xch = xscr; xch; xch = xche->next) {
1073 xche = xdl_get_hunk(&xch, xecfg);
1074 xche = xdl_get_hunk(&xch, xecfg);
1074 if (!xch)
1075 if (!xch)
1075 break;
1076 break;
1077 if (xch != xche)
1078 xdl_bug("xch != xche");
1079 xch->i1 += p;
1080 xch->i2 += p;
1076 if (xch->i1 > i1 || xch->i2 > i2) {
1081 if (xch->i1 > i1 || xch->i2 > i2) {
1077 if (xecfg->hunk_func(i1, xch->i1, i2, xch->i2, ecb->priv) < 0)
1082 if (xecfg->hunk_func(i1, xch->i1, i2, xch->i2, ecb->priv) < 0)
1078 return -1;
1083 return -1;
1079 }
1084 }
1080 i1 = xche->i1 + xche->chg1;
1085 i1 = xche->i1 + xche->chg1;
1081 i2 = xche->i2 + xche->chg2;
1086 i2 = xche->i2 + xche->chg2;
1082 }
1087 }
1083 if (xecfg->hunk_func(i1, n1, i2, n2, ecb->priv) < 0)
1088 if (xecfg->hunk_func(i1, n1 + p + s, i2, n2 + p + s,
1089 ecb->priv) < 0)
1084 return -1;
1090 return -1;
1085 } else {
1091 } else {
1086 for (xch = xscr; xch; xch = xche->next) {
1092 for (xch = xscr; xch; xch = xche->next) {
1087 xche = xdl_get_hunk(&xch, xecfg);
1093 xche = xdl_get_hunk(&xch, xecfg);
1088 if (!xch)
1094 if (!xch)
1089 break;
1095 break;
1090 if (xecfg->hunk_func(
1096 if (xecfg->hunk_func(xch->i1 + p,
1091 xch->i1, xche->i1 + xche->chg1 - xch->i1,
1097 xche->i1 + xche->chg1 - xch->i1,
1092 xch->i2, xche->i2 + xche->chg2 - xch->i2,
1098 xch->i2 + p,
1099 xche->i2 + xche->chg2 - xch->i2,
1093 ecb->priv) < 0)
1100 ecb->priv) < 0)
1094 return -1;
1101 return -1;
1095 }
1102 }
1096 }
1103 }
1097 return 0;
1104 return 0;
1098 }
1105 }
1099
1106
1100 int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
1107 int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
1101 xdemitconf_t const *xecfg, xdemitcb_t *ecb) {
1108 xdemitconf_t const *xecfg, xdemitcb_t *ecb) {
1102 xdchange_t *xscr;
1109 xdchange_t *xscr;
1103 xdfenv_t xe;
1110 xdfenv_t xe;
1104
1111
1105 if (xdl_do_diff(mf1, mf2, xpp, &xe) < 0) {
1112 if (xdl_do_diff(mf1, mf2, xpp, &xe) < 0) {
1106
1113
1107 return -1;
1114 return -1;
1108 }
1115 }
1109 if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 ||
1116 if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 ||
1110 xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) < 0 ||
1117 xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) < 0 ||
1111 xdl_build_script(&xe, &xscr) < 0) {
1118 xdl_build_script(&xe, &xscr) < 0) {
1112
1119
1113 xdl_free_env(&xe);
1120 xdl_free_env(&xe);
1114 return -1;
1121 return -1;
1115 }
1122 }
1116
1123
1117 if (xdl_call_hunk_func(&xe, xscr, ecb, xecfg) < 0) {
1124 if (xdl_call_hunk_func(&xe, xscr, ecb, xecfg) < 0) {
1118 xdl_free_script(xscr);
1125 xdl_free_script(xscr);
1119 xdl_free_env(&xe);
1126 xdl_free_env(&xe);
1120 return -1;
1127 return -1;
1121 }
1128 }
1122 xdl_free_script(xscr);
1129 xdl_free_script(xscr);
1123 xdl_free_env(&xe);
1130 xdl_free_env(&xe);
1124
1131
1125 return 0;
1132 return 0;
1126 }
1133 }
@@ -1,466 +1,552 b''
1 /*
1 /*
2 * LibXDiff by Davide Libenzi ( File Differential Library )
2 * LibXDiff by Davide Libenzi ( File Differential Library )
3 * Copyright (C) 2003 Davide Libenzi
3 * Copyright (C) 2003 Davide Libenzi
4 *
4 *
5 * This library is free software; you can redistribute it and/or
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
8 * version 2.1 of the License, or (at your option) any later version.
9 *
9 *
10 * This library is distributed in the hope that it will be useful,
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
13 * Lesser General Public License for more details.
14 *
14 *
15 * You should have received a copy of the GNU Lesser General Public
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, see
16 * License along with this library; if not, see
17 * <http://www.gnu.org/licenses/>.
17 * <http://www.gnu.org/licenses/>.
18 *
18 *
19 * Davide Libenzi <davidel@xmailserver.org>
19 * Davide Libenzi <davidel@xmailserver.org>
20 *
20 *
21 */
21 */
22
22
23 #include "xinclude.h"
23 #include "xinclude.h"
24
24
25
25
26 #define XDL_KPDIS_RUN 4
26 #define XDL_KPDIS_RUN 4
27 #define XDL_MAX_EQLIMIT 1024
27 #define XDL_MAX_EQLIMIT 1024
28 #define XDL_SIMSCAN_WINDOW 100
28 #define XDL_SIMSCAN_WINDOW 100
29 #define XDL_GUESS_NLINES1 256
29 #define XDL_GUESS_NLINES1 256
30
30
31
31
32 typedef struct s_xdlclass {
32 typedef struct s_xdlclass {
33 struct s_xdlclass *next;
33 struct s_xdlclass *next;
34 unsigned long ha;
34 unsigned long ha;
35 char const *line;
35 char const *line;
36 long size;
36 long size;
37 long idx;
37 long idx;
38 long len1, len2;
38 long len1, len2;
39 } xdlclass_t;
39 } xdlclass_t;
40
40
41 typedef struct s_xdlclassifier {
41 typedef struct s_xdlclassifier {
42 unsigned int hbits;
42 unsigned int hbits;
43 long hsize;
43 long hsize;
44 xdlclass_t **rchash;
44 xdlclass_t **rchash;
45 chastore_t ncha;
45 chastore_t ncha;
46 xdlclass_t **rcrecs;
46 xdlclass_t **rcrecs;
47 long alloc;
47 long alloc;
48 long count;
48 long count;
49 long flags;
49 long flags;
50 } xdlclassifier_t;
50 } xdlclassifier_t;
51
51
52
52
53
53
54
54
55 static int xdl_init_classifier(xdlclassifier_t *cf, long size, long flags);
55 static int xdl_init_classifier(xdlclassifier_t *cf, long size, long flags);
56 static void xdl_free_classifier(xdlclassifier_t *cf);
56 static void xdl_free_classifier(xdlclassifier_t *cf);
57 static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash,
57 static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash,
58 unsigned int hbits, xrecord_t *rec);
58 unsigned int hbits, xrecord_t *rec);
59 static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_t const *xpp,
59 static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_t const *xpp,
60 xdlclassifier_t *cf, xdfile_t *xdf);
60 xdlclassifier_t *cf, xdfile_t *xdf);
61 static void xdl_free_ctx(xdfile_t *xdf);
61 static void xdl_free_ctx(xdfile_t *xdf);
62 static int xdl_clean_mmatch(char const *dis, long i, long s, long e);
62 static int xdl_clean_mmatch(char const *dis, long i, long s, long e);
63 static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2);
63 static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2);
64 static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2);
64 static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2);
65 static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2);
65 static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2);
66
66
67
67
68
68
69
69
70 static int xdl_init_classifier(xdlclassifier_t *cf, long size, long flags) {
70 static int xdl_init_classifier(xdlclassifier_t *cf, long size, long flags) {
71 cf->flags = flags;
71 cf->flags = flags;
72
72
73 cf->hbits = xdl_hashbits((unsigned int) size);
73 cf->hbits = xdl_hashbits((unsigned int) size);
74 cf->hsize = 1 << cf->hbits;
74 cf->hsize = 1 << cf->hbits;
75
75
76 if (xdl_cha_init(&cf->ncha, sizeof(xdlclass_t), size / 4 + 1) < 0) {
76 if (xdl_cha_init(&cf->ncha, sizeof(xdlclass_t), size / 4 + 1) < 0) {
77
77
78 return -1;
78 return -1;
79 }
79 }
80 if (!(cf->rchash = (xdlclass_t **) xdl_malloc(cf->hsize * sizeof(xdlclass_t *)))) {
80 if (!(cf->rchash = (xdlclass_t **) xdl_malloc(cf->hsize * sizeof(xdlclass_t *)))) {
81
81
82 xdl_cha_free(&cf->ncha);
82 xdl_cha_free(&cf->ncha);
83 return -1;
83 return -1;
84 }
84 }
85 memset(cf->rchash, 0, cf->hsize * sizeof(xdlclass_t *));
85 memset(cf->rchash, 0, cf->hsize * sizeof(xdlclass_t *));
86
86
87 cf->alloc = size;
87 cf->alloc = size;
88 if (!(cf->rcrecs = (xdlclass_t **) xdl_malloc(cf->alloc * sizeof(xdlclass_t *)))) {
88 if (!(cf->rcrecs = (xdlclass_t **) xdl_malloc(cf->alloc * sizeof(xdlclass_t *)))) {
89
89
90 xdl_free(cf->rchash);
90 xdl_free(cf->rchash);
91 xdl_cha_free(&cf->ncha);
91 xdl_cha_free(&cf->ncha);
92 return -1;
92 return -1;
93 }
93 }
94
94
95 cf->count = 0;
95 cf->count = 0;
96
96
97 return 0;
97 return 0;
98 }
98 }
99
99
100
100
101 static void xdl_free_classifier(xdlclassifier_t *cf) {
101 static void xdl_free_classifier(xdlclassifier_t *cf) {
102
102
103 xdl_free(cf->rcrecs);
103 xdl_free(cf->rcrecs);
104 xdl_free(cf->rchash);
104 xdl_free(cf->rchash);
105 xdl_cha_free(&cf->ncha);
105 xdl_cha_free(&cf->ncha);
106 }
106 }
107
107
108
108
109 static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash,
109 static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash,
110 unsigned int hbits, xrecord_t *rec) {
110 unsigned int hbits, xrecord_t *rec) {
111 long hi;
111 long hi;
112 char const *line;
112 char const *line;
113 xdlclass_t *rcrec;
113 xdlclass_t *rcrec;
114 xdlclass_t **rcrecs;
114 xdlclass_t **rcrecs;
115
115
116 line = rec->ptr;
116 line = rec->ptr;
117 hi = (long) XDL_HASHLONG(rec->ha, cf->hbits);
117 hi = (long) XDL_HASHLONG(rec->ha, cf->hbits);
118 for (rcrec = cf->rchash[hi]; rcrec; rcrec = rcrec->next)
118 for (rcrec = cf->rchash[hi]; rcrec; rcrec = rcrec->next)
119 if (rcrec->ha == rec->ha &&
119 if (rcrec->ha == rec->ha &&
120 xdl_recmatch(rcrec->line, rcrec->size,
120 xdl_recmatch(rcrec->line, rcrec->size,
121 rec->ptr, rec->size, cf->flags))
121 rec->ptr, rec->size, cf->flags))
122 break;
122 break;
123
123
124 if (!rcrec) {
124 if (!rcrec) {
125 if (!(rcrec = xdl_cha_alloc(&cf->ncha))) {
125 if (!(rcrec = xdl_cha_alloc(&cf->ncha))) {
126
126
127 return -1;
127 return -1;
128 }
128 }
129 rcrec->idx = cf->count++;
129 rcrec->idx = cf->count++;
130 if (cf->count > cf->alloc) {
130 if (cf->count > cf->alloc) {
131 cf->alloc *= 2;
131 cf->alloc *= 2;
132 if (!(rcrecs = (xdlclass_t **) xdl_realloc(cf->rcrecs, cf->alloc * sizeof(xdlclass_t *)))) {
132 if (!(rcrecs = (xdlclass_t **) xdl_realloc(cf->rcrecs, cf->alloc * sizeof(xdlclass_t *)))) {
133
133
134 return -1;
134 return -1;
135 }
135 }
136 cf->rcrecs = rcrecs;
136 cf->rcrecs = rcrecs;
137 }
137 }
138 cf->rcrecs[rcrec->idx] = rcrec;
138 cf->rcrecs[rcrec->idx] = rcrec;
139 rcrec->line = line;
139 rcrec->line = line;
140 rcrec->size = rec->size;
140 rcrec->size = rec->size;
141 rcrec->ha = rec->ha;
141 rcrec->ha = rec->ha;
142 rcrec->len1 = rcrec->len2 = 0;
142 rcrec->len1 = rcrec->len2 = 0;
143 rcrec->next = cf->rchash[hi];
143 rcrec->next = cf->rchash[hi];
144 cf->rchash[hi] = rcrec;
144 cf->rchash[hi] = rcrec;
145 }
145 }
146
146
147 (pass == 1) ? rcrec->len1++ : rcrec->len2++;
147 (pass == 1) ? rcrec->len1++ : rcrec->len2++;
148
148
149 rec->ha = (unsigned long) rcrec->idx;
149 rec->ha = (unsigned long) rcrec->idx;
150
150
151 hi = (long) XDL_HASHLONG(rec->ha, hbits);
151 hi = (long) XDL_HASHLONG(rec->ha, hbits);
152 rec->next = rhash[hi];
152 rec->next = rhash[hi];
153 rhash[hi] = rec;
153 rhash[hi] = rec;
154
154
155 return 0;
155 return 0;
156 }
156 }
157
157
158
158
159 /*
160 * Trim common prefix from files.
161 *
162 * Note: trimming could affect hunk shifting. But the performance benefit
163 * outweighs the shift change. A diff result with suboptimal shifting is still
164 * valid.
165 */
166 static void xdl_trim_files(mmfile_t *mf1, mmfile_t *mf2, long reserved,
167 xdfenv_t *xe, mmfile_t *out_mf1, mmfile_t *out_mf2) {
168 mmfile_t msmall, mlarge;
169 /* prefix lines, prefix bytes, suffix lines, suffix bytes */
170 long plines = 0, pbytes = 0, slines = 0, sbytes = 0, i;
171 /* prefix char pointer for msmall and mlarge */
172 const char *pp1, *pp2;
173 /* suffix char pointer for msmall and mlarge */
174 const char *ps1, *ps2;
175
176 /* reserved must >= 0 for the line boundary adjustment to work */
177 if (reserved < 0)
178 reserved = 0;
179
180 if (mf1->size < mf2->size) {
181 memcpy(&msmall, mf1, sizeof(mmfile_t));
182 memcpy(&mlarge, mf2, sizeof(mmfile_t));
183 } else {
184 memcpy(&msmall, mf2, sizeof(mmfile_t));
185 memcpy(&mlarge, mf1, sizeof(mmfile_t));
186 }
187
188 pp1 = msmall.ptr, pp2 = mlarge.ptr;
189 for (i = 0; i < msmall.size && *pp1 == *pp2; ++i) {
190 plines += (*pp1 == '\n');
191 pp1++, pp2++;
192 }
193
194 ps1 = msmall.ptr + msmall.size - 1, ps2 = mlarge.ptr + mlarge.size - 1;
195 while (ps1 > pp1 && *ps1 == *ps2) {
196 slines += (*ps1 == '\n');
197 ps1--, ps2--;
198 }
199
200 /* Retract common prefix and suffix boundaries for reserved lines */
201 if (plines <= reserved + 1) {
202 plines = 0;
203 } else {
204 i = 0;
205 while (i <= reserved) {
206 pp1--;
207 i += (*pp1 == '\n');
208 }
209 /* The new mmfile starts at the next char just after '\n' */
210 pbytes = pp1 - msmall.ptr + 1;
211 plines -= reserved;
212 }
213
214 if (slines <= reserved + 1) {
215 slines = 0;
216 } else {
217 /* Note: with compiler SIMD support (ex. -O3 -mavx2), this
218 * might perform better than memchr. */
219 i = 0;
220 while (i <= reserved) {
221 ps1++;
222 i += (*ps1 == '\n');
223 }
224 /* The new mmfile includes this '\n' */
225 sbytes = msmall.ptr + msmall.size - ps1 - 1;
226 slines -= reserved;
227 if (msmall.ptr[msmall.size - 1] == '\n')
228 slines -= 1;
229 }
230
231 xe->nprefix = plines;
232 xe->nsuffix = slines;
233 out_mf1->ptr = mf1->ptr + pbytes;
234 out_mf1->size = mf1->size - pbytes - sbytes;
235 out_mf2->ptr = mf2->ptr + pbytes;
236 out_mf2->size = mf2->size - pbytes - sbytes;
237 }
238
239
159 static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_t const *xpp,
240 static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, long narec, xpparam_t const *xpp,
160 xdlclassifier_t *cf, xdfile_t *xdf) {
241 xdlclassifier_t *cf, xdfile_t *xdf) {
161 unsigned int hbits;
242 unsigned int hbits;
162 long nrec, hsize, bsize;
243 long nrec, hsize, bsize;
163 unsigned long hav;
244 unsigned long hav;
164 char const *blk, *cur, *top, *prev;
245 char const *blk, *cur, *top, *prev;
165 xrecord_t *crec;
246 xrecord_t *crec;
166 xrecord_t **recs, **rrecs;
247 xrecord_t **recs, **rrecs;
167 xrecord_t **rhash;
248 xrecord_t **rhash;
168 unsigned long *ha;
249 unsigned long *ha;
169 char *rchg;
250 char *rchg;
170 long *rindex;
251 long *rindex;
171
252
172 ha = NULL;
253 ha = NULL;
173 rindex = NULL;
254 rindex = NULL;
174 rchg = NULL;
255 rchg = NULL;
175 rhash = NULL;
256 rhash = NULL;
176 recs = NULL;
257 recs = NULL;
177
258
178 if (xdl_cha_init(&xdf->rcha, sizeof(xrecord_t), narec / 4 + 1) < 0)
259 if (xdl_cha_init(&xdf->rcha, sizeof(xrecord_t), narec / 4 + 1) < 0)
179 goto abort;
260 goto abort;
180 if (!(recs = (xrecord_t **) xdl_malloc(narec * sizeof(xrecord_t *))))
261 if (!(recs = (xrecord_t **) xdl_malloc(narec * sizeof(xrecord_t *))))
181 goto abort;
262 goto abort;
182
263
183 {
264 {
184 hbits = xdl_hashbits((unsigned int) narec);
265 hbits = xdl_hashbits((unsigned int) narec);
185 hsize = 1 << hbits;
266 hsize = 1 << hbits;
186 if (!(rhash = (xrecord_t **) xdl_malloc(hsize * sizeof(xrecord_t *))))
267 if (!(rhash = (xrecord_t **) xdl_malloc(hsize * sizeof(xrecord_t *))))
187 goto abort;
268 goto abort;
188 memset(rhash, 0, hsize * sizeof(xrecord_t *));
269 memset(rhash, 0, hsize * sizeof(xrecord_t *));
189 }
270 }
190
271
191 nrec = 0;
272 nrec = 0;
192 if ((cur = blk = xdl_mmfile_first(mf, &bsize)) != NULL) {
273 if ((cur = blk = xdl_mmfile_first(mf, &bsize)) != NULL) {
193 for (top = blk + bsize; cur < top; ) {
274 for (top = blk + bsize; cur < top; ) {
194 prev = cur;
275 prev = cur;
195 hav = xdl_hash_record(&cur, top, xpp->flags);
276 hav = xdl_hash_record(&cur, top, xpp->flags);
196 if (nrec >= narec) {
277 if (nrec >= narec) {
197 narec *= 2;
278 narec *= 2;
198 if (!(rrecs = (xrecord_t **) xdl_realloc(recs, narec * sizeof(xrecord_t *))))
279 if (!(rrecs = (xrecord_t **) xdl_realloc(recs, narec * sizeof(xrecord_t *))))
199 goto abort;
280 goto abort;
200 recs = rrecs;
281 recs = rrecs;
201 }
282 }
202 if (!(crec = xdl_cha_alloc(&xdf->rcha)))
283 if (!(crec = xdl_cha_alloc(&xdf->rcha)))
203 goto abort;
284 goto abort;
204 crec->ptr = prev;
285 crec->ptr = prev;
205 crec->size = (long) (cur - prev);
286 crec->size = (long) (cur - prev);
206 crec->ha = hav;
287 crec->ha = hav;
207 recs[nrec++] = crec;
288 recs[nrec++] = crec;
208
289
209 if (xdl_classify_record(pass, cf, rhash, hbits, crec) < 0)
290 if (xdl_classify_record(pass, cf, rhash, hbits, crec) < 0)
210 goto abort;
291 goto abort;
211 }
292 }
212 }
293 }
213
294
214 if (!(rchg = (char *) xdl_malloc((nrec + 2) * sizeof(char))))
295 if (!(rchg = (char *) xdl_malloc((nrec + 2) * sizeof(char))))
215 goto abort;
296 goto abort;
216 memset(rchg, 0, (nrec + 2) * sizeof(char));
297 memset(rchg, 0, (nrec + 2) * sizeof(char));
217
298
218 if (!(rindex = (long *) xdl_malloc((nrec + 1) * sizeof(long))))
299 if (!(rindex = (long *) xdl_malloc((nrec + 1) * sizeof(long))))
219 goto abort;
300 goto abort;
220 if (!(ha = (unsigned long *) xdl_malloc((nrec + 1) * sizeof(unsigned long))))
301 if (!(ha = (unsigned long *) xdl_malloc((nrec + 1) * sizeof(unsigned long))))
221 goto abort;
302 goto abort;
222
303
223 xdf->nrec = nrec;
304 xdf->nrec = nrec;
224 xdf->recs = recs;
305 xdf->recs = recs;
225 xdf->hbits = hbits;
306 xdf->hbits = hbits;
226 xdf->rhash = rhash;
307 xdf->rhash = rhash;
227 xdf->rchg = rchg + 1;
308 xdf->rchg = rchg + 1;
228 xdf->rindex = rindex;
309 xdf->rindex = rindex;
229 xdf->nreff = 0;
310 xdf->nreff = 0;
230 xdf->ha = ha;
311 xdf->ha = ha;
231 xdf->dstart = 0;
312 xdf->dstart = 0;
232 xdf->dend = nrec - 1;
313 xdf->dend = nrec - 1;
233
314
234 return 0;
315 return 0;
235
316
236 abort:
317 abort:
237 xdl_free(ha);
318 xdl_free(ha);
238 xdl_free(rindex);
319 xdl_free(rindex);
239 xdl_free(rchg);
320 xdl_free(rchg);
240 xdl_free(rhash);
321 xdl_free(rhash);
241 xdl_free(recs);
322 xdl_free(recs);
242 xdl_cha_free(&xdf->rcha);
323 xdl_cha_free(&xdf->rcha);
243 return -1;
324 return -1;
244 }
325 }
245
326
246
327
247 static void xdl_free_ctx(xdfile_t *xdf) {
328 static void xdl_free_ctx(xdfile_t *xdf) {
248
329
249 xdl_free(xdf->rhash);
330 xdl_free(xdf->rhash);
250 xdl_free(xdf->rindex);
331 xdl_free(xdf->rindex);
251 xdl_free(xdf->rchg - 1);
332 xdl_free(xdf->rchg - 1);
252 xdl_free(xdf->ha);
333 xdl_free(xdf->ha);
253 xdl_free(xdf->recs);
334 xdl_free(xdf->recs);
254 xdl_cha_free(&xdf->rcha);
335 xdl_cha_free(&xdf->rcha);
255 }
336 }
256
337
338 /* Reserved lines for trimming, to leave room for shifting */
339 #define TRIM_RESERVED_LINES 100
257
340
258 int xdl_prepare_env(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
341 int xdl_prepare_env(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
259 xdfenv_t *xe) {
342 xdfenv_t *xe) {
260 long enl1, enl2, sample;
343 long enl1, enl2, sample;
344 mmfile_t tmf1, tmf2;
261 xdlclassifier_t cf;
345 xdlclassifier_t cf;
262
346
263 memset(&cf, 0, sizeof(cf));
347 memset(&cf, 0, sizeof(cf));
264
348
265 sample = XDL_GUESS_NLINES1;
349 sample = XDL_GUESS_NLINES1;
266
350
267 enl1 = xdl_guess_lines(mf1, sample) + 1;
351 enl1 = xdl_guess_lines(mf1, sample) + 1;
268 enl2 = xdl_guess_lines(mf2, sample) + 1;
352 enl2 = xdl_guess_lines(mf2, sample) + 1;
269
353
270 if (xdl_init_classifier(&cf, enl1 + enl2 + 1, xpp->flags) < 0)
354 if (xdl_init_classifier(&cf, enl1 + enl2 + 1, xpp->flags) < 0)
271 return -1;
355 return -1;
272
356
273 if (xdl_prepare_ctx(1, mf1, enl1, xpp, &cf, &xe->xdf1) < 0) {
357 xdl_trim_files(mf1, mf2, TRIM_RESERVED_LINES, xe, &tmf1, &tmf2);
358
359 if (xdl_prepare_ctx(1, &tmf1, enl1, xpp, &cf, &xe->xdf1) < 0) {
274
360
275 xdl_free_classifier(&cf);
361 xdl_free_classifier(&cf);
276 return -1;
362 return -1;
277 }
363 }
278 if (xdl_prepare_ctx(2, mf2, enl2, xpp, &cf, &xe->xdf2) < 0) {
364 if (xdl_prepare_ctx(2, &tmf2, enl2, xpp, &cf, &xe->xdf2) < 0) {
279
365
280 xdl_free_ctx(&xe->xdf1);
366 xdl_free_ctx(&xe->xdf1);
281 xdl_free_classifier(&cf);
367 xdl_free_classifier(&cf);
282 return -1;
368 return -1;
283 }
369 }
284
370
285 if (xdl_optimize_ctxs(&cf, &xe->xdf1, &xe->xdf2) < 0) {
371 if (xdl_optimize_ctxs(&cf, &xe->xdf1, &xe->xdf2) < 0) {
286 xdl_free_ctx(&xe->xdf2);
372 xdl_free_ctx(&xe->xdf2);
287 xdl_free_ctx(&xe->xdf1);
373 xdl_free_ctx(&xe->xdf1);
288 xdl_free_classifier(&cf);
374 xdl_free_classifier(&cf);
289 return -1;
375 return -1;
290 }
376 }
291
377
292 xdl_free_classifier(&cf);
378 xdl_free_classifier(&cf);
293
379
294 return 0;
380 return 0;
295 }
381 }
296
382
297
383
298 void xdl_free_env(xdfenv_t *xe) {
384 void xdl_free_env(xdfenv_t *xe) {
299
385
300 xdl_free_ctx(&xe->xdf2);
386 xdl_free_ctx(&xe->xdf2);
301 xdl_free_ctx(&xe->xdf1);
387 xdl_free_ctx(&xe->xdf1);
302 }
388 }
303
389
304
390
305 static int xdl_clean_mmatch(char const *dis, long i, long s, long e) {
391 static int xdl_clean_mmatch(char const *dis, long i, long s, long e) {
306 long r, rdis0, rpdis0, rdis1, rpdis1;
392 long r, rdis0, rpdis0, rdis1, rpdis1;
307
393
308 /*
394 /*
309 * Limits the window the is examined during the similar-lines
395 * Limits the window the is examined during the similar-lines
310 * scan. The loops below stops when dis[i - r] == 1 (line that
396 * scan. The loops below stops when dis[i - r] == 1 (line that
311 * has no match), but there are corner cases where the loop
397 * has no match), but there are corner cases where the loop
312 * proceed all the way to the extremities by causing huge
398 * proceed all the way to the extremities by causing huge
313 * performance penalties in case of big files.
399 * performance penalties in case of big files.
314 */
400 */
315 if (i - s > XDL_SIMSCAN_WINDOW)
401 if (i - s > XDL_SIMSCAN_WINDOW)
316 s = i - XDL_SIMSCAN_WINDOW;
402 s = i - XDL_SIMSCAN_WINDOW;
317 if (e - i > XDL_SIMSCAN_WINDOW)
403 if (e - i > XDL_SIMSCAN_WINDOW)
318 e = i + XDL_SIMSCAN_WINDOW;
404 e = i + XDL_SIMSCAN_WINDOW;
319
405
320 /*
406 /*
321 * Scans the lines before 'i' to find a run of lines that either
407 * Scans the lines before 'i' to find a run of lines that either
322 * have no match (dis[j] == 0) or have multiple matches (dis[j] > 1).
408 * have no match (dis[j] == 0) or have multiple matches (dis[j] > 1).
323 * Note that we always call this function with dis[i] > 1, so the
409 * Note that we always call this function with dis[i] > 1, so the
324 * current line (i) is already a multimatch line.
410 * current line (i) is already a multimatch line.
325 */
411 */
326 for (r = 1, rdis0 = 0, rpdis0 = 1; (i - r) >= s; r++) {
412 for (r = 1, rdis0 = 0, rpdis0 = 1; (i - r) >= s; r++) {
327 if (!dis[i - r])
413 if (!dis[i - r])
328 rdis0++;
414 rdis0++;
329 else if (dis[i - r] == 2)
415 else if (dis[i - r] == 2)
330 rpdis0++;
416 rpdis0++;
331 else
417 else
332 break;
418 break;
333 }
419 }
334 /*
420 /*
335 * If the run before the line 'i' found only multimatch lines, we
421 * If the run before the line 'i' found only multimatch lines, we
336 * return 0 and hence we don't make the current line (i) discarded.
422 * return 0 and hence we don't make the current line (i) discarded.
337 * We want to discard multimatch lines only when they appear in the
423 * We want to discard multimatch lines only when they appear in the
338 * middle of runs with nomatch lines (dis[j] == 0).
424 * middle of runs with nomatch lines (dis[j] == 0).
339 */
425 */
340 if (rdis0 == 0)
426 if (rdis0 == 0)
341 return 0;
427 return 0;
342 for (r = 1, rdis1 = 0, rpdis1 = 1; (i + r) <= e; r++) {
428 for (r = 1, rdis1 = 0, rpdis1 = 1; (i + r) <= e; r++) {
343 if (!dis[i + r])
429 if (!dis[i + r])
344 rdis1++;
430 rdis1++;
345 else if (dis[i + r] == 2)
431 else if (dis[i + r] == 2)
346 rpdis1++;
432 rpdis1++;
347 else
433 else
348 break;
434 break;
349 }
435 }
350 /*
436 /*
351 * If the run after the line 'i' found only multimatch lines, we
437 * If the run after the line 'i' found only multimatch lines, we
352 * return 0 and hence we don't make the current line (i) discarded.
438 * return 0 and hence we don't make the current line (i) discarded.
353 */
439 */
354 if (rdis1 == 0)
440 if (rdis1 == 0)
355 return 0;
441 return 0;
356 rdis1 += rdis0;
442 rdis1 += rdis0;
357 rpdis1 += rpdis0;
443 rpdis1 += rpdis0;
358
444
359 return rpdis1 * XDL_KPDIS_RUN < (rpdis1 + rdis1);
445 return rpdis1 * XDL_KPDIS_RUN < (rpdis1 + rdis1);
360 }
446 }
361
447
362
448
363 /*
449 /*
364 * Try to reduce the problem complexity, discard records that have no
450 * Try to reduce the problem complexity, discard records that have no
365 * matches on the other file. Also, lines that have multiple matches
451 * matches on the other file. Also, lines that have multiple matches
366 * might be potentially discarded if they happear in a run of discardable.
452 * might be potentially discarded if they happear in a run of discardable.
367 */
453 */
368 static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2) {
454 static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2) {
369 long i, nm, nreff, mlim;
455 long i, nm, nreff, mlim;
370 xrecord_t **recs;
456 xrecord_t **recs;
371 xdlclass_t *rcrec;
457 xdlclass_t *rcrec;
372 char *dis, *dis1, *dis2;
458 char *dis, *dis1, *dis2;
373
459
374 if (!(dis = (char *) xdl_malloc(xdf1->nrec + xdf2->nrec + 2))) {
460 if (!(dis = (char *) xdl_malloc(xdf1->nrec + xdf2->nrec + 2))) {
375
461
376 return -1;
462 return -1;
377 }
463 }
378 memset(dis, 0, xdf1->nrec + xdf2->nrec + 2);
464 memset(dis, 0, xdf1->nrec + xdf2->nrec + 2);
379 dis1 = dis;
465 dis1 = dis;
380 dis2 = dis1 + xdf1->nrec + 1;
466 dis2 = dis1 + xdf1->nrec + 1;
381
467
382 if ((mlim = xdl_bogosqrt(xdf1->nrec)) > XDL_MAX_EQLIMIT)
468 if ((mlim = xdl_bogosqrt(xdf1->nrec)) > XDL_MAX_EQLIMIT)
383 mlim = XDL_MAX_EQLIMIT;
469 mlim = XDL_MAX_EQLIMIT;
384 for (i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart]; i <= xdf1->dend; i++, recs++) {
470 for (i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart]; i <= xdf1->dend; i++, recs++) {
385 rcrec = cf->rcrecs[(*recs)->ha];
471 rcrec = cf->rcrecs[(*recs)->ha];
386 nm = rcrec ? rcrec->len2 : 0;
472 nm = rcrec ? rcrec->len2 : 0;
387 dis1[i] = (nm == 0) ? 0: (nm >= mlim) ? 2: 1;
473 dis1[i] = (nm == 0) ? 0: (nm >= mlim) ? 2: 1;
388 }
474 }
389
475
390 if ((mlim = xdl_bogosqrt(xdf2->nrec)) > XDL_MAX_EQLIMIT)
476 if ((mlim = xdl_bogosqrt(xdf2->nrec)) > XDL_MAX_EQLIMIT)
391 mlim = XDL_MAX_EQLIMIT;
477 mlim = XDL_MAX_EQLIMIT;
392 for (i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart]; i <= xdf2->dend; i++, recs++) {
478 for (i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart]; i <= xdf2->dend; i++, recs++) {
393 rcrec = cf->rcrecs[(*recs)->ha];
479 rcrec = cf->rcrecs[(*recs)->ha];
394 nm = rcrec ? rcrec->len1 : 0;
480 nm = rcrec ? rcrec->len1 : 0;
395 dis2[i] = (nm == 0) ? 0: (nm >= mlim) ? 2: 1;
481 dis2[i] = (nm == 0) ? 0: (nm >= mlim) ? 2: 1;
396 }
482 }
397
483
398 for (nreff = 0, i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart];
484 for (nreff = 0, i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart];
399 i <= xdf1->dend; i++, recs++) {
485 i <= xdf1->dend; i++, recs++) {
400 if (dis1[i] == 1 ||
486 if (dis1[i] == 1 ||
401 (dis1[i] == 2 && !xdl_clean_mmatch(dis1, i, xdf1->dstart, xdf1->dend))) {
487 (dis1[i] == 2 && !xdl_clean_mmatch(dis1, i, xdf1->dstart, xdf1->dend))) {
402 xdf1->rindex[nreff] = i;
488 xdf1->rindex[nreff] = i;
403 xdf1->ha[nreff] = (*recs)->ha;
489 xdf1->ha[nreff] = (*recs)->ha;
404 nreff++;
490 nreff++;
405 } else
491 } else
406 xdf1->rchg[i] = 1;
492 xdf1->rchg[i] = 1;
407 }
493 }
408 xdf1->nreff = nreff;
494 xdf1->nreff = nreff;
409
495
410 for (nreff = 0, i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart];
496 for (nreff = 0, i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart];
411 i <= xdf2->dend; i++, recs++) {
497 i <= xdf2->dend; i++, recs++) {
412 if (dis2[i] == 1 ||
498 if (dis2[i] == 1 ||
413 (dis2[i] == 2 && !xdl_clean_mmatch(dis2, i, xdf2->dstart, xdf2->dend))) {
499 (dis2[i] == 2 && !xdl_clean_mmatch(dis2, i, xdf2->dstart, xdf2->dend))) {
414 xdf2->rindex[nreff] = i;
500 xdf2->rindex[nreff] = i;
415 xdf2->ha[nreff] = (*recs)->ha;
501 xdf2->ha[nreff] = (*recs)->ha;
416 nreff++;
502 nreff++;
417 } else
503 } else
418 xdf2->rchg[i] = 1;
504 xdf2->rchg[i] = 1;
419 }
505 }
420 xdf2->nreff = nreff;
506 xdf2->nreff = nreff;
421
507
422 xdl_free(dis);
508 xdl_free(dis);
423
509
424 return 0;
510 return 0;
425 }
511 }
426
512
427
513
428 /*
514 /*
429 * Early trim initial and terminal matching records.
515 * Early trim initial and terminal matching records.
430 */
516 */
431 static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2) {
517 static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2) {
432 long i, lim;
518 long i, lim;
433 xrecord_t **recs1, **recs2;
519 xrecord_t **recs1, **recs2;
434
520
435 recs1 = xdf1->recs;
521 recs1 = xdf1->recs;
436 recs2 = xdf2->recs;
522 recs2 = xdf2->recs;
437 for (i = 0, lim = XDL_MIN(xdf1->nrec, xdf2->nrec); i < lim;
523 for (i = 0, lim = XDL_MIN(xdf1->nrec, xdf2->nrec); i < lim;
438 i++, recs1++, recs2++)
524 i++, recs1++, recs2++)
439 if ((*recs1)->ha != (*recs2)->ha)
525 if ((*recs1)->ha != (*recs2)->ha)
440 break;
526 break;
441
527
442 xdf1->dstart = xdf2->dstart = i;
528 xdf1->dstart = xdf2->dstart = i;
443
529
444 recs1 = xdf1->recs + xdf1->nrec - 1;
530 recs1 = xdf1->recs + xdf1->nrec - 1;
445 recs2 = xdf2->recs + xdf2->nrec - 1;
531 recs2 = xdf2->recs + xdf2->nrec - 1;
446 for (lim -= i, i = 0; i < lim; i++, recs1--, recs2--)
532 for (lim -= i, i = 0; i < lim; i++, recs1--, recs2--)
447 if ((*recs1)->ha != (*recs2)->ha)
533 if ((*recs1)->ha != (*recs2)->ha)
448 break;
534 break;
449
535
450 xdf1->dend = xdf1->nrec - i - 1;
536 xdf1->dend = xdf1->nrec - i - 1;
451 xdf2->dend = xdf2->nrec - i - 1;
537 xdf2->dend = xdf2->nrec - i - 1;
452
538
453 return 0;
539 return 0;
454 }
540 }
455
541
456
542
457 static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2) {
543 static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2) {
458
544
459 if (xdl_trim_ends(xdf1, xdf2) < 0 ||
545 if (xdl_trim_ends(xdf1, xdf2) < 0 ||
460 xdl_cleanup_records(cf, xdf1, xdf2) < 0) {
546 xdl_cleanup_records(cf, xdf1, xdf2) < 0) {
461
547
462 return -1;
548 return -1;
463 }
549 }
464
550
465 return 0;
551 return 0;
466 }
552 }
@@ -1,67 +1,71 b''
1 /*
1 /*
2 * LibXDiff by Davide Libenzi ( File Differential Library )
2 * LibXDiff by Davide Libenzi ( File Differential Library )
3 * Copyright (C) 2003 Davide Libenzi
3 * Copyright (C) 2003 Davide Libenzi
4 *
4 *
5 * This library is free software; you can redistribute it and/or
5 * This library is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU Lesser General Public
6 * modify it under the terms of the GNU Lesser General Public
7 * License as published by the Free Software Foundation; either
7 * License as published by the Free Software Foundation; either
8 * version 2.1 of the License, or (at your option) any later version.
8 * version 2.1 of the License, or (at your option) any later version.
9 *
9 *
10 * This library is distributed in the hope that it will be useful,
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * Lesser General Public License for more details.
13 * Lesser General Public License for more details.
14 *
14 *
15 * You should have received a copy of the GNU Lesser General Public
15 * You should have received a copy of the GNU Lesser General Public
16 * License along with this library; if not, see
16 * License along with this library; if not, see
17 * <http://www.gnu.org/licenses/>.
17 * <http://www.gnu.org/licenses/>.
18 *
18 *
19 * Davide Libenzi <davidel@xmailserver.org>
19 * Davide Libenzi <davidel@xmailserver.org>
20 *
20 *
21 */
21 */
22
22
23 #if !defined(XTYPES_H)
23 #if !defined(XTYPES_H)
24 #define XTYPES_H
24 #define XTYPES_H
25
25
26
26
27
27
28 typedef struct s_chanode {
28 typedef struct s_chanode {
29 struct s_chanode *next;
29 struct s_chanode *next;
30 long icurr;
30 long icurr;
31 } chanode_t;
31 } chanode_t;
32
32
33 typedef struct s_chastore {
33 typedef struct s_chastore {
34 chanode_t *head, *tail;
34 chanode_t *head, *tail;
35 long isize, nsize;
35 long isize, nsize;
36 chanode_t *ancur;
36 chanode_t *ancur;
37 chanode_t *sncur;
37 chanode_t *sncur;
38 long scurr;
38 long scurr;
39 } chastore_t;
39 } chastore_t;
40
40
41 typedef struct s_xrecord {
41 typedef struct s_xrecord {
42 struct s_xrecord *next;
42 struct s_xrecord *next;
43 char const *ptr;
43 char const *ptr;
44 long size;
44 long size;
45 unsigned long ha;
45 unsigned long ha;
46 } xrecord_t;
46 } xrecord_t;
47
47
48 typedef struct s_xdfile {
48 typedef struct s_xdfile {
49 chastore_t rcha;
49 chastore_t rcha;
50 long nrec;
50 long nrec;
51 unsigned int hbits;
51 unsigned int hbits;
52 xrecord_t **rhash;
52 xrecord_t **rhash;
53 long dstart, dend;
53 long dstart, dend;
54 xrecord_t **recs;
54 xrecord_t **recs;
55 char *rchg;
55 char *rchg;
56 long *rindex;
56 long *rindex;
57 long nreff;
57 long nreff;
58 unsigned long *ha;
58 unsigned long *ha;
59 } xdfile_t;
59 } xdfile_t;
60
60
61 typedef struct s_xdfenv {
61 typedef struct s_xdfenv {
62 xdfile_t xdf1, xdf2;
62 xdfile_t xdf1, xdf2;
63
64 /* number of lines for common prefix and suffix that are removed
65 * from xdf1 and xdf2 as a preprocessing step */
66 long nprefix, nsuffix;
63 } xdfenv_t;
67 } xdfenv_t;
64
68
65
69
66
70
67 #endif /* #if !defined(XTYPES_H) */
71 #endif /* #if !defined(XTYPES_H) */
General Comments 0
You need to be logged in to leave comments. Login now