Show More
@@ -1,1090 +1,1094 | |||
|
1 | 1 | /* |
|
2 | 2 | * LibXDiff by Davide Libenzi ( File Differential Library ) |
|
3 | 3 | * Copyright (C) 2003 Davide Libenzi |
|
4 | 4 | * |
|
5 | 5 | * This library is free software; you can redistribute it and/or |
|
6 | 6 | * modify it under the terms of the GNU Lesser General Public |
|
7 | 7 | * License as published by the Free Software Foundation; either |
|
8 | 8 | * version 2.1 of the License, or (at your option) any later version. |
|
9 | 9 | * |
|
10 | 10 | * This library is distributed in the hope that it will be useful, |
|
11 | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
13 | 13 | * Lesser General Public License for more details. |
|
14 | 14 | * |
|
15 | 15 | * You should have received a copy of the GNU Lesser General Public |
|
16 | 16 | * License along with this library; if not, see |
|
17 | 17 | * <http://www.gnu.org/licenses/>. |
|
18 | 18 | * |
|
19 | 19 | * Davide Libenzi <davidel@xmailserver.org> |
|
20 | 20 | * |
|
21 | 21 | */ |
|
22 | 22 | |
|
23 | 23 | #include "xinclude.h" |
|
24 | 24 | |
|
25 | 25 | |
|
26 | 26 | |
|
27 | 27 | #define XDL_MAX_COST_MIN 256 |
|
28 | 28 | #define XDL_HEUR_MIN_COST 256 |
|
29 | 29 | #define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1) |
|
30 | 30 | #define XDL_SNAKE_CNT 20 |
|
31 | 31 | #define XDL_K_HEUR 4 |
|
32 | 32 | |
|
33 | /* VC 2008 doesn't know about the inline keyword. */ | |
|
34 | #if defined(_MSC_VER) | |
|
35 | #define inline __forceinline | |
|
36 | #endif | |
|
33 | 37 | |
|
34 | 38 | |
|
35 | 39 | typedef struct s_xdpsplit { |
|
36 | 40 | long i1, i2; |
|
37 | 41 | int min_lo, min_hi; |
|
38 | 42 | } xdpsplit_t; |
|
39 | 43 | |
|
40 | 44 | |
|
41 | 45 | |
|
42 | 46 | |
|
43 | 47 | static long xdl_split(unsigned long const *ha1, long off1, long lim1, |
|
44 | 48 | unsigned long const *ha2, long off2, long lim2, |
|
45 | 49 | long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl, |
|
46 | 50 | xdalgoenv_t *xenv); |
|
47 | 51 | static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2); |
|
48 | 52 | |
|
49 | 53 | |
|
50 | 54 | |
|
51 | 55 | |
|
52 | 56 | |
|
53 | 57 | /* |
|
54 | 58 | * See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers. |
|
55 | 59 | * Basically considers a "box" (off1, off2, lim1, lim2) and scan from both |
|
56 | 60 | * the forward diagonal starting from (off1, off2) and the backward diagonal |
|
57 | 61 | * starting from (lim1, lim2). If the K values on the same diagonal crosses |
|
58 | 62 | * returns the furthest point of reach. We might end up having to expensive |
|
59 | 63 | * cases using this algorithm is full, so a little bit of heuristic is needed |
|
60 | 64 | * to cut the search and to return a suboptimal point. |
|
61 | 65 | */ |
|
62 | 66 | static long xdl_split(unsigned long const *ha1, long off1, long lim1, |
|
63 | 67 | unsigned long const *ha2, long off2, long lim2, |
|
64 | 68 | long *kvdf, long *kvdb, int need_min, xdpsplit_t *spl, |
|
65 | 69 | xdalgoenv_t *xenv) { |
|
66 | 70 | long dmin = off1 - lim2, dmax = lim1 - off2; |
|
67 | 71 | long fmid = off1 - off2, bmid = lim1 - lim2; |
|
68 | 72 | long odd = (fmid - bmid) & 1; |
|
69 | 73 | long fmin = fmid, fmax = fmid; |
|
70 | 74 | long bmin = bmid, bmax = bmid; |
|
71 | 75 | long ec, d, i1, i2, prev1, best, dd, v, k; |
|
72 | 76 | |
|
73 | 77 | /* |
|
74 | 78 | * Set initial diagonal values for both forward and backward path. |
|
75 | 79 | */ |
|
76 | 80 | kvdf[fmid] = off1; |
|
77 | 81 | kvdb[bmid] = lim1; |
|
78 | 82 | |
|
79 | 83 | for (ec = 1;; ec++) { |
|
80 | 84 | int got_snake = 0; |
|
81 | 85 | |
|
82 | 86 | /* |
|
83 | 87 | * We need to extent the diagonal "domain" by one. If the next |
|
84 | 88 | * values exits the box boundaries we need to change it in the |
|
85 | 89 | * opposite direction because (max - min) must be a power of two. |
|
86 | 90 | * Also we initialize the external K value to -1 so that we can |
|
87 | 91 | * avoid extra conditions check inside the core loop. |
|
88 | 92 | */ |
|
89 | 93 | if (fmin > dmin) |
|
90 | 94 | kvdf[--fmin - 1] = -1; |
|
91 | 95 | else |
|
92 | 96 | ++fmin; |
|
93 | 97 | if (fmax < dmax) |
|
94 | 98 | kvdf[++fmax + 1] = -1; |
|
95 | 99 | else |
|
96 | 100 | --fmax; |
|
97 | 101 | |
|
98 | 102 | for (d = fmax; d >= fmin; d -= 2) { |
|
99 | 103 | if (kvdf[d - 1] >= kvdf[d + 1]) |
|
100 | 104 | i1 = kvdf[d - 1] + 1; |
|
101 | 105 | else |
|
102 | 106 | i1 = kvdf[d + 1]; |
|
103 | 107 | prev1 = i1; |
|
104 | 108 | i2 = i1 - d; |
|
105 | 109 | for (; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++); |
|
106 | 110 | if (i1 - prev1 > xenv->snake_cnt) |
|
107 | 111 | got_snake = 1; |
|
108 | 112 | kvdf[d] = i1; |
|
109 | 113 | if (odd && bmin <= d && d <= bmax && kvdb[d] <= i1) { |
|
110 | 114 | spl->i1 = i1; |
|
111 | 115 | spl->i2 = i2; |
|
112 | 116 | spl->min_lo = spl->min_hi = 1; |
|
113 | 117 | return ec; |
|
114 | 118 | } |
|
115 | 119 | } |
|
116 | 120 | |
|
117 | 121 | /* |
|
118 | 122 | * We need to extent the diagonal "domain" by one. If the next |
|
119 | 123 | * values exits the box boundaries we need to change it in the |
|
120 | 124 | * opposite direction because (max - min) must be a power of two. |
|
121 | 125 | * Also we initialize the external K value to -1 so that we can |
|
122 | 126 | * avoid extra conditions check inside the core loop. |
|
123 | 127 | */ |
|
124 | 128 | if (bmin > dmin) |
|
125 | 129 | kvdb[--bmin - 1] = XDL_LINE_MAX; |
|
126 | 130 | else |
|
127 | 131 | ++bmin; |
|
128 | 132 | if (bmax < dmax) |
|
129 | 133 | kvdb[++bmax + 1] = XDL_LINE_MAX; |
|
130 | 134 | else |
|
131 | 135 | --bmax; |
|
132 | 136 | |
|
133 | 137 | for (d = bmax; d >= bmin; d -= 2) { |
|
134 | 138 | if (kvdb[d - 1] < kvdb[d + 1]) |
|
135 | 139 | i1 = kvdb[d - 1]; |
|
136 | 140 | else |
|
137 | 141 | i1 = kvdb[d + 1] - 1; |
|
138 | 142 | prev1 = i1; |
|
139 | 143 | i2 = i1 - d; |
|
140 | 144 | for (; i1 > off1 && i2 > off2 && ha1[i1 - 1] == ha2[i2 - 1]; i1--, i2--); |
|
141 | 145 | if (prev1 - i1 > xenv->snake_cnt) |
|
142 | 146 | got_snake = 1; |
|
143 | 147 | kvdb[d] = i1; |
|
144 | 148 | if (!odd && fmin <= d && d <= fmax && i1 <= kvdf[d]) { |
|
145 | 149 | spl->i1 = i1; |
|
146 | 150 | spl->i2 = i2; |
|
147 | 151 | spl->min_lo = spl->min_hi = 1; |
|
148 | 152 | return ec; |
|
149 | 153 | } |
|
150 | 154 | } |
|
151 | 155 | |
|
152 | 156 | if (need_min) |
|
153 | 157 | continue; |
|
154 | 158 | |
|
155 | 159 | /* |
|
156 | 160 | * If the edit cost is above the heuristic trigger and if |
|
157 | 161 | * we got a good snake, we sample current diagonals to see |
|
158 | 162 | * if some of the, have reached an "interesting" path. Our |
|
159 | 163 | * measure is a function of the distance from the diagonal |
|
160 | 164 | * corner (i1 + i2) penalized with the distance from the |
|
161 | 165 | * mid diagonal itself. If this value is above the current |
|
162 | 166 | * edit cost times a magic factor (XDL_K_HEUR) we consider |
|
163 | 167 | * it interesting. |
|
164 | 168 | */ |
|
165 | 169 | if (got_snake && ec > xenv->heur_min) { |
|
166 | 170 | for (best = 0, d = fmax; d >= fmin; d -= 2) { |
|
167 | 171 | dd = d > fmid ? d - fmid: fmid - d; |
|
168 | 172 | i1 = kvdf[d]; |
|
169 | 173 | i2 = i1 - d; |
|
170 | 174 | v = (i1 - off1) + (i2 - off2) - dd; |
|
171 | 175 | |
|
172 | 176 | if (v > XDL_K_HEUR * ec && v > best && |
|
173 | 177 | off1 + xenv->snake_cnt <= i1 && i1 < lim1 && |
|
174 | 178 | off2 + xenv->snake_cnt <= i2 && i2 < lim2) { |
|
175 | 179 | for (k = 1; ha1[i1 - k] == ha2[i2 - k]; k++) |
|
176 | 180 | if (k == xenv->snake_cnt) { |
|
177 | 181 | best = v; |
|
178 | 182 | spl->i1 = i1; |
|
179 | 183 | spl->i2 = i2; |
|
180 | 184 | break; |
|
181 | 185 | } |
|
182 | 186 | } |
|
183 | 187 | } |
|
184 | 188 | if (best > 0) { |
|
185 | 189 | spl->min_lo = 1; |
|
186 | 190 | spl->min_hi = 0; |
|
187 | 191 | return ec; |
|
188 | 192 | } |
|
189 | 193 | |
|
190 | 194 | for (best = 0, d = bmax; d >= bmin; d -= 2) { |
|
191 | 195 | dd = d > bmid ? d - bmid: bmid - d; |
|
192 | 196 | i1 = kvdb[d]; |
|
193 | 197 | i2 = i1 - d; |
|
194 | 198 | v = (lim1 - i1) + (lim2 - i2) - dd; |
|
195 | 199 | |
|
196 | 200 | if (v > XDL_K_HEUR * ec && v > best && |
|
197 | 201 | off1 < i1 && i1 <= lim1 - xenv->snake_cnt && |
|
198 | 202 | off2 < i2 && i2 <= lim2 - xenv->snake_cnt) { |
|
199 | 203 | for (k = 0; ha1[i1 + k] == ha2[i2 + k]; k++) |
|
200 | 204 | if (k == xenv->snake_cnt - 1) { |
|
201 | 205 | best = v; |
|
202 | 206 | spl->i1 = i1; |
|
203 | 207 | spl->i2 = i2; |
|
204 | 208 | break; |
|
205 | 209 | } |
|
206 | 210 | } |
|
207 | 211 | } |
|
208 | 212 | if (best > 0) { |
|
209 | 213 | spl->min_lo = 0; |
|
210 | 214 | spl->min_hi = 1; |
|
211 | 215 | return ec; |
|
212 | 216 | } |
|
213 | 217 | } |
|
214 | 218 | |
|
215 | 219 | /* |
|
216 | 220 | * Enough is enough. We spent too much time here and now we collect |
|
217 | 221 | * the furthest reaching path using the (i1 + i2) measure. |
|
218 | 222 | */ |
|
219 | 223 | if (ec >= xenv->mxcost) { |
|
220 | 224 | long fbest, fbest1, bbest, bbest1; |
|
221 | 225 | |
|
222 | 226 | fbest = fbest1 = -1; |
|
223 | 227 | for (d = fmax; d >= fmin; d -= 2) { |
|
224 | 228 | i1 = XDL_MIN(kvdf[d], lim1); |
|
225 | 229 | i2 = i1 - d; |
|
226 | 230 | if (lim2 < i2) |
|
227 | 231 | i1 = lim2 + d, i2 = lim2; |
|
228 | 232 | if (fbest < i1 + i2) { |
|
229 | 233 | fbest = i1 + i2; |
|
230 | 234 | fbest1 = i1; |
|
231 | 235 | } |
|
232 | 236 | } |
|
233 | 237 | |
|
234 | 238 | bbest = bbest1 = XDL_LINE_MAX; |
|
235 | 239 | for (d = bmax; d >= bmin; d -= 2) { |
|
236 | 240 | i1 = XDL_MAX(off1, kvdb[d]); |
|
237 | 241 | i2 = i1 - d; |
|
238 | 242 | if (i2 < off2) |
|
239 | 243 | i1 = off2 + d, i2 = off2; |
|
240 | 244 | if (i1 + i2 < bbest) { |
|
241 | 245 | bbest = i1 + i2; |
|
242 | 246 | bbest1 = i1; |
|
243 | 247 | } |
|
244 | 248 | } |
|
245 | 249 | |
|
246 | 250 | if ((lim1 + lim2) - bbest < fbest - (off1 + off2)) { |
|
247 | 251 | spl->i1 = fbest1; |
|
248 | 252 | spl->i2 = fbest - fbest1; |
|
249 | 253 | spl->min_lo = 1; |
|
250 | 254 | spl->min_hi = 0; |
|
251 | 255 | } else { |
|
252 | 256 | spl->i1 = bbest1; |
|
253 | 257 | spl->i2 = bbest - bbest1; |
|
254 | 258 | spl->min_lo = 0; |
|
255 | 259 | spl->min_hi = 1; |
|
256 | 260 | } |
|
257 | 261 | return ec; |
|
258 | 262 | } |
|
259 | 263 | } |
|
260 | 264 | } |
|
261 | 265 | |
|
262 | 266 | |
|
263 | 267 | /* |
|
264 | 268 | * Rule: "Divide et Impera". Recursively split the box in sub-boxes by calling |
|
265 | 269 | * the box splitting function. Note that the real job (marking changed lines) |
|
266 | 270 | * is done in the two boundary reaching checks. |
|
267 | 271 | */ |
|
268 | 272 | int xdl_recs_cmp(diffdata_t *dd1, long off1, long lim1, |
|
269 | 273 | diffdata_t *dd2, long off2, long lim2, |
|
270 | 274 | long *kvdf, long *kvdb, int need_min, xdalgoenv_t *xenv) { |
|
271 | 275 | unsigned long const *ha1 = dd1->ha, *ha2 = dd2->ha; |
|
272 | 276 | |
|
273 | 277 | /* |
|
274 | 278 | * Shrink the box by walking through each diagonal snake (SW and NE). |
|
275 | 279 | */ |
|
276 | 280 | for (; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++); |
|
277 | 281 | for (; off1 < lim1 && off2 < lim2 && ha1[lim1 - 1] == ha2[lim2 - 1]; lim1--, lim2--); |
|
278 | 282 | |
|
279 | 283 | /* |
|
280 | 284 | * If one dimension is empty, then all records on the other one must |
|
281 | 285 | * be obviously changed. |
|
282 | 286 | */ |
|
283 | 287 | if (off1 == lim1) { |
|
284 | 288 | char *rchg2 = dd2->rchg; |
|
285 | 289 | long *rindex2 = dd2->rindex; |
|
286 | 290 | |
|
287 | 291 | for (; off2 < lim2; off2++) |
|
288 | 292 | rchg2[rindex2[off2]] = 1; |
|
289 | 293 | } else if (off2 == lim2) { |
|
290 | 294 | char *rchg1 = dd1->rchg; |
|
291 | 295 | long *rindex1 = dd1->rindex; |
|
292 | 296 | |
|
293 | 297 | for (; off1 < lim1; off1++) |
|
294 | 298 | rchg1[rindex1[off1]] = 1; |
|
295 | 299 | } else { |
|
296 | 300 | xdpsplit_t spl; |
|
297 | 301 | spl.i1 = spl.i2 = 0; |
|
298 | 302 | |
|
299 | 303 | /* |
|
300 | 304 | * Divide ... |
|
301 | 305 | */ |
|
302 | 306 | if (xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb, |
|
303 | 307 | need_min, &spl, xenv) < 0) { |
|
304 | 308 | |
|
305 | 309 | return -1; |
|
306 | 310 | } |
|
307 | 311 | |
|
308 | 312 | /* |
|
309 | 313 | * ... et Impera. |
|
310 | 314 | */ |
|
311 | 315 | if (xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2, |
|
312 | 316 | kvdf, kvdb, spl.min_lo, xenv) < 0 || |
|
313 | 317 | xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2, |
|
314 | 318 | kvdf, kvdb, spl.min_hi, xenv) < 0) { |
|
315 | 319 | |
|
316 | 320 | return -1; |
|
317 | 321 | } |
|
318 | 322 | } |
|
319 | 323 | |
|
320 | 324 | return 0; |
|
321 | 325 | } |
|
322 | 326 | |
|
323 | 327 | |
|
324 | 328 | int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, |
|
325 | 329 | xdfenv_t *xe) { |
|
326 | 330 | long ndiags; |
|
327 | 331 | long *kvd, *kvdf, *kvdb; |
|
328 | 332 | xdalgoenv_t xenv; |
|
329 | 333 | diffdata_t dd1, dd2; |
|
330 | 334 | |
|
331 | 335 | if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0) { |
|
332 | 336 | |
|
333 | 337 | return -1; |
|
334 | 338 | } |
|
335 | 339 | |
|
336 | 340 | /* |
|
337 | 341 | * Allocate and setup K vectors to be used by the differential algorithm. |
|
338 | 342 | * One is to store the forward path and one to store the backward path. |
|
339 | 343 | */ |
|
340 | 344 | ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3; |
|
341 | 345 | if (!(kvd = (long *) xdl_malloc((2 * ndiags + 2) * sizeof(long)))) { |
|
342 | 346 | |
|
343 | 347 | xdl_free_env(xe); |
|
344 | 348 | return -1; |
|
345 | 349 | } |
|
346 | 350 | kvdf = kvd; |
|
347 | 351 | kvdb = kvdf + ndiags; |
|
348 | 352 | kvdf += xe->xdf2.nreff + 1; |
|
349 | 353 | kvdb += xe->xdf2.nreff + 1; |
|
350 | 354 | |
|
351 | 355 | xenv.mxcost = xdl_bogosqrt(ndiags); |
|
352 | 356 | if (xenv.mxcost < XDL_MAX_COST_MIN) |
|
353 | 357 | xenv.mxcost = XDL_MAX_COST_MIN; |
|
354 | 358 | xenv.snake_cnt = XDL_SNAKE_CNT; |
|
355 | 359 | xenv.heur_min = XDL_HEUR_MIN_COST; |
|
356 | 360 | |
|
357 | 361 | dd1.nrec = xe->xdf1.nreff; |
|
358 | 362 | dd1.ha = xe->xdf1.ha; |
|
359 | 363 | dd1.rchg = xe->xdf1.rchg; |
|
360 | 364 | dd1.rindex = xe->xdf1.rindex; |
|
361 | 365 | dd2.nrec = xe->xdf2.nreff; |
|
362 | 366 | dd2.ha = xe->xdf2.ha; |
|
363 | 367 | dd2.rchg = xe->xdf2.rchg; |
|
364 | 368 | dd2.rindex = xe->xdf2.rindex; |
|
365 | 369 | |
|
366 | 370 | if (xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec, |
|
367 | 371 | kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0, &xenv) < 0) { |
|
368 | 372 | |
|
369 | 373 | xdl_free(kvd); |
|
370 | 374 | xdl_free_env(xe); |
|
371 | 375 | return -1; |
|
372 | 376 | } |
|
373 | 377 | |
|
374 | 378 | xdl_free(kvd); |
|
375 | 379 | |
|
376 | 380 | return 0; |
|
377 | 381 | } |
|
378 | 382 | |
|
379 | 383 | |
|
380 | 384 | static xdchange_t *xdl_add_change(xdchange_t *xscr, long i1, long i2, long chg1, long chg2) { |
|
381 | 385 | xdchange_t *xch; |
|
382 | 386 | |
|
383 | 387 | if (!(xch = (xdchange_t *) xdl_malloc(sizeof(xdchange_t)))) |
|
384 | 388 | return NULL; |
|
385 | 389 | |
|
386 | 390 | xch->next = xscr; |
|
387 | 391 | xch->i1 = i1; |
|
388 | 392 | xch->i2 = i2; |
|
389 | 393 | xch->chg1 = chg1; |
|
390 | 394 | xch->chg2 = chg2; |
|
391 | 395 | xch->ignore = 0; |
|
392 | 396 | |
|
393 | 397 | return xch; |
|
394 | 398 | } |
|
395 | 399 | |
|
396 | 400 | |
|
397 | 401 | static int recs_match(xrecord_t *rec1, xrecord_t *rec2, long flags) |
|
398 | 402 | { |
|
399 | 403 | return (rec1->ha == rec2->ha && |
|
400 | 404 | xdl_recmatch(rec1->ptr, rec1->size, |
|
401 | 405 | rec2->ptr, rec2->size, |
|
402 | 406 | flags)); |
|
403 | 407 | } |
|
404 | 408 | |
|
405 | 409 | /* |
|
406 | 410 | * If a line is indented more than this, get_indent() just returns this value. |
|
407 | 411 | * This avoids having to do absurd amounts of work for data that are not |
|
408 | 412 | * human-readable text, and also ensures that the output of get_indent fits within |
|
409 | 413 | * an int. |
|
410 | 414 | */ |
|
411 | 415 | #define MAX_INDENT 200 |
|
412 | 416 | |
|
413 | 417 | /* |
|
414 | 418 | * Return the amount of indentation of the specified line, treating TAB as 8 |
|
415 | 419 | * columns. Return -1 if line is empty or contains only whitespace. Clamp the |
|
416 | 420 | * output value at MAX_INDENT. |
|
417 | 421 | */ |
|
418 | 422 | static int get_indent(xrecord_t *rec) |
|
419 | 423 | { |
|
420 | 424 | long i; |
|
421 | 425 | int ret = 0; |
|
422 | 426 | |
|
423 | 427 | for (i = 0; i < rec->size; i++) { |
|
424 | 428 | char c = rec->ptr[i]; |
|
425 | 429 | |
|
426 | 430 | if (!XDL_ISSPACE(c)) |
|
427 | 431 | return ret; |
|
428 | 432 | else if (c == ' ') |
|
429 | 433 | ret += 1; |
|
430 | 434 | else if (c == '\t') |
|
431 | 435 | ret += 8 - ret % 8; |
|
432 | 436 | /* ignore other whitespace characters */ |
|
433 | 437 | |
|
434 | 438 | if (ret >= MAX_INDENT) |
|
435 | 439 | return MAX_INDENT; |
|
436 | 440 | } |
|
437 | 441 | |
|
438 | 442 | /* The line contains only whitespace. */ |
|
439 | 443 | return -1; |
|
440 | 444 | } |
|
441 | 445 | |
|
442 | 446 | /* |
|
443 | 447 | * If more than this number of consecutive blank rows are found, just return this |
|
444 | 448 | * value. This avoids requiring O(N^2) work for pathological cases, and also |
|
445 | 449 | * ensures that the output of score_split fits in an int. |
|
446 | 450 | */ |
|
447 | 451 | #define MAX_BLANKS 20 |
|
448 | 452 | |
|
449 | 453 | /* Characteristics measured about a hypothetical split position. */ |
|
450 | 454 | struct split_measurement { |
|
451 | 455 | /* |
|
452 | 456 | * Is the split at the end of the file (aside from any blank lines)? |
|
453 | 457 | */ |
|
454 | 458 | int end_of_file; |
|
455 | 459 | |
|
456 | 460 | /* |
|
457 | 461 | * How much is the line immediately following the split indented (or -1 if |
|
458 | 462 | * the line is blank): |
|
459 | 463 | */ |
|
460 | 464 | int indent; |
|
461 | 465 | |
|
462 | 466 | /* |
|
463 | 467 | * How many consecutive lines above the split are blank? |
|
464 | 468 | */ |
|
465 | 469 | int pre_blank; |
|
466 | 470 | |
|
467 | 471 | /* |
|
468 | 472 | * How much is the nearest non-blank line above the split indented (or -1 |
|
469 | 473 | * if there is no such line)? |
|
470 | 474 | */ |
|
471 | 475 | int pre_indent; |
|
472 | 476 | |
|
473 | 477 | /* |
|
474 | 478 | * How many lines after the line following the split are blank? |
|
475 | 479 | */ |
|
476 | 480 | int post_blank; |
|
477 | 481 | |
|
478 | 482 | /* |
|
479 | 483 | * How much is the nearest non-blank line after the line following the |
|
480 | 484 | * split indented (or -1 if there is no such line)? |
|
481 | 485 | */ |
|
482 | 486 | int post_indent; |
|
483 | 487 | }; |
|
484 | 488 | |
|
485 | 489 | struct split_score { |
|
486 | 490 | /* The effective indent of this split (smaller is preferred). */ |
|
487 | 491 | int effective_indent; |
|
488 | 492 | |
|
489 | 493 | /* Penalty for this split (smaller is preferred). */ |
|
490 | 494 | int penalty; |
|
491 | 495 | }; |
|
492 | 496 | |
|
493 | 497 | /* |
|
494 | 498 | * Fill m with information about a hypothetical split of xdf above line split. |
|
495 | 499 | */ |
|
496 | 500 | static void measure_split(const xdfile_t *xdf, long split, |
|
497 | 501 | struct split_measurement *m) |
|
498 | 502 | { |
|
499 | 503 | long i; |
|
500 | 504 | |
|
501 | 505 | if (split >= xdf->nrec) { |
|
502 | 506 | m->end_of_file = 1; |
|
503 | 507 | m->indent = -1; |
|
504 | 508 | } else { |
|
505 | 509 | m->end_of_file = 0; |
|
506 | 510 | m->indent = get_indent(xdf->recs[split]); |
|
507 | 511 | } |
|
508 | 512 | |
|
509 | 513 | m->pre_blank = 0; |
|
510 | 514 | m->pre_indent = -1; |
|
511 | 515 | for (i = split - 1; i >= 0; i--) { |
|
512 | 516 | m->pre_indent = get_indent(xdf->recs[i]); |
|
513 | 517 | if (m->pre_indent != -1) |
|
514 | 518 | break; |
|
515 | 519 | m->pre_blank += 1; |
|
516 | 520 | if (m->pre_blank == MAX_BLANKS) { |
|
517 | 521 | m->pre_indent = 0; |
|
518 | 522 | break; |
|
519 | 523 | } |
|
520 | 524 | } |
|
521 | 525 | |
|
522 | 526 | m->post_blank = 0; |
|
523 | 527 | m->post_indent = -1; |
|
524 | 528 | for (i = split + 1; i < xdf->nrec; i++) { |
|
525 | 529 | m->post_indent = get_indent(xdf->recs[i]); |
|
526 | 530 | if (m->post_indent != -1) |
|
527 | 531 | break; |
|
528 | 532 | m->post_blank += 1; |
|
529 | 533 | if (m->post_blank == MAX_BLANKS) { |
|
530 | 534 | m->post_indent = 0; |
|
531 | 535 | break; |
|
532 | 536 | } |
|
533 | 537 | } |
|
534 | 538 | } |
|
535 | 539 | |
|
536 | 540 | /* |
|
537 | 541 | * The empirically-determined weight factors used by score_split() below. |
|
538 | 542 | * Larger values means that the position is a less favorable place to split. |
|
539 | 543 | * |
|
540 | 544 | * Note that scores are only ever compared against each other, so multiplying |
|
541 | 545 | * all of these weight/penalty values by the same factor wouldn't change the |
|
542 | 546 | * heuristic's behavior. Still, we need to set that arbitrary scale *somehow*. |
|
543 | 547 | * In practice, these numbers are chosen to be large enough that they can be |
|
544 | 548 | * adjusted relative to each other with sufficient precision despite using |
|
545 | 549 | * integer math. |
|
546 | 550 | */ |
|
547 | 551 | |
|
548 | 552 | /* Penalty if there are no non-blank lines before the split */ |
|
549 | 553 | #define START_OF_FILE_PENALTY 1 |
|
550 | 554 | |
|
551 | 555 | /* Penalty if there are no non-blank lines after the split */ |
|
552 | 556 | #define END_OF_FILE_PENALTY 21 |
|
553 | 557 | |
|
554 | 558 | /* Multiplier for the number of blank lines around the split */ |
|
555 | 559 | #define TOTAL_BLANK_WEIGHT (-30) |
|
556 | 560 | |
|
557 | 561 | /* Multiplier for the number of blank lines after the split */ |
|
558 | 562 | #define POST_BLANK_WEIGHT 6 |
|
559 | 563 | |
|
560 | 564 | /* |
|
561 | 565 | * Penalties applied if the line is indented more than its predecessor |
|
562 | 566 | */ |
|
563 | 567 | #define RELATIVE_INDENT_PENALTY (-4) |
|
564 | 568 | #define RELATIVE_INDENT_WITH_BLANK_PENALTY 10 |
|
565 | 569 | |
|
566 | 570 | /* |
|
567 | 571 | * Penalties applied if the line is indented less than both its predecessor and |
|
568 | 572 | * its successor |
|
569 | 573 | */ |
|
570 | 574 | #define RELATIVE_OUTDENT_PENALTY 24 |
|
571 | 575 | #define RELATIVE_OUTDENT_WITH_BLANK_PENALTY 17 |
|
572 | 576 | |
|
573 | 577 | /* |
|
574 | 578 | * Penalties applied if the line is indented less than its predecessor but not |
|
575 | 579 | * less than its successor |
|
576 | 580 | */ |
|
577 | 581 | #define RELATIVE_DEDENT_PENALTY 23 |
|
578 | 582 | #define RELATIVE_DEDENT_WITH_BLANK_PENALTY 17 |
|
579 | 583 | |
|
580 | 584 | /* |
|
581 | 585 | * We only consider whether the sum of the effective indents for splits are |
|
582 | 586 | * less than (-1), equal to (0), or greater than (+1) each other. The resulting |
|
583 | 587 | * value is multiplied by the following weight and combined with the penalty to |
|
584 | 588 | * determine the better of two scores. |
|
585 | 589 | */ |
|
586 | 590 | #define INDENT_WEIGHT 60 |
|
587 | 591 | |
|
588 | 592 | /* |
|
589 | 593 | * Compute a badness score for the hypothetical split whose measurements are |
|
590 | 594 | * stored in m. The weight factors were determined empirically using the tools and |
|
591 | 595 | * corpus described in |
|
592 | 596 | * |
|
593 | 597 | * https://github.com/mhagger/diff-slider-tools |
|
594 | 598 | * |
|
595 | 599 | * Also see that project if you want to improve the weights based on, for example, |
|
596 | 600 | * a larger or more diverse corpus. |
|
597 | 601 | */ |
|
598 | 602 | static void score_add_split(const struct split_measurement *m, struct split_score *s) |
|
599 | 603 | { |
|
600 | 604 | /* |
|
601 | 605 | * A place to accumulate penalty factors (positive makes this index more |
|
602 | 606 | * favored): |
|
603 | 607 | */ |
|
604 | 608 | int post_blank, total_blank, indent, any_blanks; |
|
605 | 609 | |
|
606 | 610 | if (m->pre_indent == -1 && m->pre_blank == 0) |
|
607 | 611 | s->penalty += START_OF_FILE_PENALTY; |
|
608 | 612 | |
|
609 | 613 | if (m->end_of_file) |
|
610 | 614 | s->penalty += END_OF_FILE_PENALTY; |
|
611 | 615 | |
|
612 | 616 | /* |
|
613 | 617 | * Set post_blank to the number of blank lines following the split, |
|
614 | 618 | * including the line immediately after the split: |
|
615 | 619 | */ |
|
616 | 620 | post_blank = (m->indent == -1) ? 1 + m->post_blank : 0; |
|
617 | 621 | total_blank = m->pre_blank + post_blank; |
|
618 | 622 | |
|
619 | 623 | /* Penalties based on nearby blank lines: */ |
|
620 | 624 | s->penalty += TOTAL_BLANK_WEIGHT * total_blank; |
|
621 | 625 | s->penalty += POST_BLANK_WEIGHT * post_blank; |
|
622 | 626 | |
|
623 | 627 | if (m->indent != -1) |
|
624 | 628 | indent = m->indent; |
|
625 | 629 | else |
|
626 | 630 | indent = m->post_indent; |
|
627 | 631 | |
|
628 | 632 | any_blanks = (total_blank != 0); |
|
629 | 633 | |
|
630 | 634 | /* Note that the effective indent is -1 at the end of the file: */ |
|
631 | 635 | s->effective_indent += indent; |
|
632 | 636 | |
|
633 | 637 | if (indent == -1) { |
|
634 | 638 | /* No additional adjustments needed. */ |
|
635 | 639 | } else if (m->pre_indent == -1) { |
|
636 | 640 | /* No additional adjustments needed. */ |
|
637 | 641 | } else if (indent > m->pre_indent) { |
|
638 | 642 | /* |
|
639 | 643 | * The line is indented more than its predecessor. |
|
640 | 644 | */ |
|
641 | 645 | s->penalty += any_blanks ? |
|
642 | 646 | RELATIVE_INDENT_WITH_BLANK_PENALTY : |
|
643 | 647 | RELATIVE_INDENT_PENALTY; |
|
644 | 648 | } else if (indent == m->pre_indent) { |
|
645 | 649 | /* |
|
646 | 650 | * The line has the same indentation level as its predecessor. |
|
647 | 651 | * No additional adjustments needed. |
|
648 | 652 | */ |
|
649 | 653 | } else { |
|
650 | 654 | /* |
|
651 | 655 | * The line is indented less than its predecessor. It could be |
|
652 | 656 | * the block terminator of the previous block, but it could |
|
653 | 657 | * also be the start of a new block (e.g., an "else" block, or |
|
654 | 658 | * maybe the previous block didn't have a block terminator). |
|
655 | 659 | * Try to distinguish those cases based on what comes next: |
|
656 | 660 | */ |
|
657 | 661 | if (m->post_indent != -1 && m->post_indent > indent) { |
|
658 | 662 | /* |
|
659 | 663 | * The following line is indented more. So it is likely |
|
660 | 664 | * that this line is the start of a block. |
|
661 | 665 | */ |
|
662 | 666 | s->penalty += any_blanks ? |
|
663 | 667 | RELATIVE_OUTDENT_WITH_BLANK_PENALTY : |
|
664 | 668 | RELATIVE_OUTDENT_PENALTY; |
|
665 | 669 | } else { |
|
666 | 670 | /* |
|
667 | 671 | * That was probably the end of a block. |
|
668 | 672 | */ |
|
669 | 673 | s->penalty += any_blanks ? |
|
670 | 674 | RELATIVE_DEDENT_WITH_BLANK_PENALTY : |
|
671 | 675 | RELATIVE_DEDENT_PENALTY; |
|
672 | 676 | } |
|
673 | 677 | } |
|
674 | 678 | } |
|
675 | 679 | |
|
676 | 680 | static int score_cmp(struct split_score *s1, struct split_score *s2) |
|
677 | 681 | { |
|
678 | 682 | /* -1 if s1.effective_indent < s2->effective_indent, etc. */ |
|
679 | 683 | int cmp_indents = ((s1->effective_indent > s2->effective_indent) - |
|
680 | 684 | (s1->effective_indent < s2->effective_indent)); |
|
681 | 685 | |
|
682 | 686 | return INDENT_WEIGHT * cmp_indents + (s1->penalty - s2->penalty); |
|
683 | 687 | } |
|
684 | 688 | |
|
685 | 689 | /* |
|
686 | 690 | * Represent a group of changed lines in an xdfile_t (i.e., a contiguous group |
|
687 | 691 | * of lines that was inserted or deleted from the corresponding version of the |
|
688 | 692 | * file). We consider there to be such a group at the beginning of the file, at |
|
689 | 693 | * the end of the file, and between any two unchanged lines, though most such |
|
690 | 694 | * groups will usually be empty. |
|
691 | 695 | * |
|
692 | 696 | * If the first line in a group is equal to the line following the group, then |
|
693 | 697 | * the group can be slid down. Similarly, if the last line in a group is equal |
|
694 | 698 | * to the line preceding the group, then the group can be slid up. See |
|
695 | 699 | * group_slide_down() and group_slide_up(). |
|
696 | 700 | * |
|
697 | 701 | * Note that loops that are testing for changed lines in xdf->rchg do not need |
|
698 | 702 | * index bounding since the array is prepared with a zero at position -1 and N. |
|
699 | 703 | */ |
|
700 | 704 | struct xdlgroup { |
|
701 | 705 | /* |
|
702 | 706 | * The index of the first changed line in the group, or the index of |
|
703 | 707 | * the unchanged line above which the (empty) group is located. |
|
704 | 708 | */ |
|
705 | 709 | long start; |
|
706 | 710 | |
|
707 | 711 | /* |
|
708 | 712 | * The index of the first unchanged line after the group. For an empty |
|
709 | 713 | * group, end is equal to start. |
|
710 | 714 | */ |
|
711 | 715 | long end; |
|
712 | 716 | }; |
|
713 | 717 | |
|
714 | 718 | /* |
|
715 | 719 | * Initialize g to point at the first group in xdf. |
|
716 | 720 | */ |
|
717 | 721 | static void group_init(xdfile_t *xdf, struct xdlgroup *g) |
|
718 | 722 | { |
|
719 | 723 | g->start = g->end = 0; |
|
720 | 724 | while (xdf->rchg[g->end]) |
|
721 | 725 | g->end++; |
|
722 | 726 | } |
|
723 | 727 | |
|
724 | 728 | /* |
|
725 | 729 | * Move g to describe the next (possibly empty) group in xdf and return 0. If g |
|
726 | 730 | * is already at the end of the file, do nothing and return -1. |
|
727 | 731 | */ |
|
728 | 732 | static inline int group_next(xdfile_t *xdf, struct xdlgroup *g) |
|
729 | 733 | { |
|
730 | 734 | if (g->end == xdf->nrec) |
|
731 | 735 | return -1; |
|
732 | 736 | |
|
733 | 737 | g->start = g->end + 1; |
|
734 | 738 | for (g->end = g->start; xdf->rchg[g->end]; g->end++) |
|
735 | 739 | ; |
|
736 | 740 | |
|
737 | 741 | return 0; |
|
738 | 742 | } |
|
739 | 743 | |
|
740 | 744 | /* |
|
741 | 745 | * Move g to describe the previous (possibly empty) group in xdf and return 0. |
|
742 | 746 | * If g is already at the beginning of the file, do nothing and return -1. |
|
743 | 747 | */ |
|
744 | 748 | static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g) |
|
745 | 749 | { |
|
746 | 750 | if (g->start == 0) |
|
747 | 751 | return -1; |
|
748 | 752 | |
|
749 | 753 | g->end = g->start - 1; |
|
750 | 754 | for (g->start = g->end; xdf->rchg[g->start - 1]; g->start--) |
|
751 | 755 | ; |
|
752 | 756 | |
|
753 | 757 | return 0; |
|
754 | 758 | } |
|
755 | 759 | |
|
756 | 760 | /* |
|
757 | 761 | * If g can be slid toward the end of the file, do so, and if it bumps into a |
|
758 | 762 | * following group, expand this group to include it. Return 0 on success or -1 |
|
759 | 763 | * if g cannot be slid down. |
|
760 | 764 | */ |
|
761 | 765 | static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g, long flags) |
|
762 | 766 | { |
|
763 | 767 | if (g->end < xdf->nrec && |
|
764 | 768 | recs_match(xdf->recs[g->start], xdf->recs[g->end], flags)) { |
|
765 | 769 | xdf->rchg[g->start++] = 0; |
|
766 | 770 | xdf->rchg[g->end++] = 1; |
|
767 | 771 | |
|
768 | 772 | while (xdf->rchg[g->end]) |
|
769 | 773 | g->end++; |
|
770 | 774 | |
|
771 | 775 | return 0; |
|
772 | 776 | } else { |
|
773 | 777 | return -1; |
|
774 | 778 | } |
|
775 | 779 | } |
|
776 | 780 | |
|
777 | 781 | /* |
|
778 | 782 | * If g can be slid toward the beginning of the file, do so, and if it bumps |
|
779 | 783 | * into a previous group, expand this group to include it. Return 0 on success |
|
780 | 784 | * or -1 if g cannot be slid up. |
|
781 | 785 | */ |
|
782 | 786 | static int group_slide_up(xdfile_t *xdf, struct xdlgroup *g, long flags) |
|
783 | 787 | { |
|
784 | 788 | if (g->start > 0 && |
|
785 | 789 | recs_match(xdf->recs[g->start - 1], xdf->recs[g->end - 1], flags)) { |
|
786 | 790 | xdf->rchg[--g->start] = 1; |
|
787 | 791 | xdf->rchg[--g->end] = 0; |
|
788 | 792 | |
|
789 | 793 | while (xdf->rchg[g->start - 1]) |
|
790 | 794 | g->start--; |
|
791 | 795 | |
|
792 | 796 | return 0; |
|
793 | 797 | } else { |
|
794 | 798 | return -1; |
|
795 | 799 | } |
|
796 | 800 | } |
|
797 | 801 | |
|
798 | 802 | static void xdl_bug(const char *msg) |
|
799 | 803 | { |
|
800 | 804 | fprintf(stderr, "BUG: %s\n", msg); |
|
801 | 805 | exit(1); |
|
802 | 806 | } |
|
803 | 807 | |
|
804 | 808 | /* |
|
805 | 809 | * For indentation heuristic, skip searching for better slide position after |
|
806 | 810 | * checking MAX_BORING lines without finding an improvement. This defends the |
|
807 | 811 | * indentation heuristic logic against pathological cases. The value is not |
|
808 | 812 | * picked scientifically but should be good enough. |
|
809 | 813 | */ |
|
810 | 814 | #define MAX_BORING 100 |
|
811 | 815 | |
|
812 | 816 | /* |
|
813 | 817 | * Move back and forward change groups for a consistent and pretty diff output. |
|
814 | 818 | * This also helps in finding joinable change groups and reducing the diff |
|
815 | 819 | * size. |
|
816 | 820 | */ |
|
817 | 821 | int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, long flags) { |
|
818 | 822 | struct xdlgroup g, go; |
|
819 | 823 | long earliest_end, end_matching_other; |
|
820 | 824 | long groupsize; |
|
821 | 825 | |
|
822 | 826 | group_init(xdf, &g); |
|
823 | 827 | group_init(xdfo, &go); |
|
824 | 828 | |
|
825 | 829 | while (1) { |
|
826 | 830 | /* If the group is empty in the to-be-compacted file, skip it: */ |
|
827 | 831 | if (g.end == g.start) |
|
828 | 832 | goto next; |
|
829 | 833 | |
|
830 | 834 | /* |
|
831 | 835 | * Now shift the change up and then down as far as possible in |
|
832 | 836 | * each direction. If it bumps into any other changes, merge them. |
|
833 | 837 | */ |
|
834 | 838 | do { |
|
835 | 839 | groupsize = g.end - g.start; |
|
836 | 840 | |
|
837 | 841 | /* |
|
838 | 842 | * Keep track of the last "end" index that causes this |
|
839 | 843 | * group to align with a group of changed lines in the |
|
840 | 844 | * other file. -1 indicates that we haven't found such |
|
841 | 845 | * a match yet: |
|
842 | 846 | */ |
|
843 | 847 | end_matching_other = -1; |
|
844 | 848 | |
|
845 | 849 | /* Shift the group backward as much as possible: */ |
|
846 | 850 | while (!group_slide_up(xdf, &g, flags)) |
|
847 | 851 | if (group_previous(xdfo, &go)) |
|
848 | 852 | xdl_bug("group sync broken sliding up"); |
|
849 | 853 | |
|
850 | 854 | /* |
|
851 | 855 | * This is this highest that this group can be shifted. |
|
852 | 856 | * Record its end index: |
|
853 | 857 | */ |
|
854 | 858 | earliest_end = g.end; |
|
855 | 859 | |
|
856 | 860 | if (go.end > go.start) |
|
857 | 861 | end_matching_other = g.end; |
|
858 | 862 | |
|
859 | 863 | /* Now shift the group forward as far as possible: */ |
|
860 | 864 | while (1) { |
|
861 | 865 | if (group_slide_down(xdf, &g, flags)) |
|
862 | 866 | break; |
|
863 | 867 | if (group_next(xdfo, &go)) |
|
864 | 868 | xdl_bug("group sync broken sliding down"); |
|
865 | 869 | |
|
866 | 870 | if (go.end > go.start) |
|
867 | 871 | end_matching_other = g.end; |
|
868 | 872 | } |
|
869 | 873 | } while (groupsize != g.end - g.start); |
|
870 | 874 | |
|
871 | 875 | /* |
|
872 | 876 | * If the group can be shifted, then we can possibly use this |
|
873 | 877 | * freedom to produce a more intuitive diff. |
|
874 | 878 | * |
|
875 | 879 | * The group is currently shifted as far down as possible, so the |
|
876 | 880 | * heuristics below only have to handle upwards shifts. |
|
877 | 881 | */ |
|
878 | 882 | |
|
879 | 883 | if (g.end == earliest_end) { |
|
880 | 884 | /* no shifting was possible */ |
|
881 | 885 | } else if (end_matching_other != -1) { |
|
882 | 886 | /* |
|
883 | 887 | * Move the possibly merged group of changes back to line |
|
884 | 888 | * up with the last group of changes from the other file |
|
885 | 889 | * that it can align with. |
|
886 | 890 | */ |
|
887 | 891 | while (go.end == go.start) { |
|
888 | 892 | if (group_slide_up(xdf, &g, flags)) |
|
889 | 893 | xdl_bug("match disappeared"); |
|
890 | 894 | if (group_previous(xdfo, &go)) |
|
891 | 895 | xdl_bug("group sync broken sliding to match"); |
|
892 | 896 | } |
|
893 | 897 | } else if (flags & XDF_INDENT_HEURISTIC) { |
|
894 | 898 | /* |
|
895 | 899 | * Indent heuristic: a group of pure add/delete lines |
|
896 | 900 | * implies two splits, one between the end of the "before" |
|
897 | 901 | * context and the start of the group, and another between |
|
898 | 902 | * the end of the group and the beginning of the "after" |
|
899 | 903 | * context. Some splits are aesthetically better and some |
|
900 | 904 | * are worse. We compute a badness "score" for each split, |
|
901 | 905 | * and add the scores for the two splits to define a |
|
902 | 906 | * "score" for each position that the group can be shifted |
|
903 | 907 | * to. Then we pick the shift with the lowest score. |
|
904 | 908 | */ |
|
905 | 909 | long shift, best_shift = -1; |
|
906 | 910 | struct split_score best_score; |
|
907 | 911 | |
|
908 | 912 | /* |
|
909 | 913 | * This is O(N * MAX_BLANKS) (N = shift-able lines). |
|
910 | 914 | * Even with MAX_BLANKS bounded to a small value, a |
|
911 | 915 | * large N could still make this loop take several |
|
912 | 916 | * times longer than the main diff algorithm. The |
|
913 | 917 | * "boring" value is to help cut down N to something |
|
914 | 918 | * like (MAX_BORING + groupsize). |
|
915 | 919 | * |
|
916 | 920 | * Scan from bottom to top. So we can exit the loop |
|
917 | 921 | * without compromising the assumption "for a same best |
|
918 | 922 | * score, pick the bottommost shift". |
|
919 | 923 | */ |
|
920 | 924 | int boring = 0; |
|
921 | 925 | for (shift = g.end; shift >= earliest_end; shift--) { |
|
922 | 926 | struct split_measurement m; |
|
923 | 927 | struct split_score score = {0, 0}; |
|
924 | 928 | int cmp; |
|
925 | 929 | |
|
926 | 930 | measure_split(xdf, shift, &m); |
|
927 | 931 | score_add_split(&m, &score); |
|
928 | 932 | measure_split(xdf, shift - groupsize, &m); |
|
929 | 933 | score_add_split(&m, &score); |
|
930 | 934 | |
|
931 | 935 | if (best_shift == -1) { |
|
932 | 936 | cmp = -1; |
|
933 | 937 | } else { |
|
934 | 938 | cmp = score_cmp(&score, &best_score); |
|
935 | 939 | } |
|
936 | 940 | if (cmp < 0) { |
|
937 | 941 | boring = 0; |
|
938 | 942 | best_score.effective_indent = score.effective_indent; |
|
939 | 943 | best_score.penalty = score.penalty; |
|
940 | 944 | best_shift = shift; |
|
941 | 945 | } else { |
|
942 | 946 | boring += 1; |
|
943 | 947 | if (boring >= MAX_BORING) |
|
944 | 948 | break; |
|
945 | 949 | } |
|
946 | 950 | } |
|
947 | 951 | |
|
948 | 952 | while (g.end > best_shift) { |
|
949 | 953 | if (group_slide_up(xdf, &g, flags)) |
|
950 | 954 | xdl_bug("best shift unreached"); |
|
951 | 955 | if (group_previous(xdfo, &go)) |
|
952 | 956 | xdl_bug("group sync broken sliding to blank line"); |
|
953 | 957 | } |
|
954 | 958 | } |
|
955 | 959 | |
|
956 | 960 | next: |
|
957 | 961 | /* Move past the just-processed group: */ |
|
958 | 962 | if (group_next(xdf, &g)) |
|
959 | 963 | break; |
|
960 | 964 | if (group_next(xdfo, &go)) |
|
961 | 965 | xdl_bug("group sync broken moving to next group"); |
|
962 | 966 | } |
|
963 | 967 | |
|
964 | 968 | if (!group_next(xdfo, &go)) |
|
965 | 969 | xdl_bug("group sync broken at end of file"); |
|
966 | 970 | |
|
967 | 971 | return 0; |
|
968 | 972 | } |
|
969 | 973 | |
|
970 | 974 | |
|
971 | 975 | int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr) { |
|
972 | 976 | xdchange_t *cscr = NULL, *xch; |
|
973 | 977 | char *rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg; |
|
974 | 978 | long i1, i2, l1, l2; |
|
975 | 979 | |
|
976 | 980 | /* |
|
977 | 981 | * Trivial. Collects "groups" of changes and creates an edit script. |
|
978 | 982 | */ |
|
979 | 983 | for (i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >= 0 || i2 >= 0; i1--, i2--) |
|
980 | 984 | if (rchg1[i1 - 1] || rchg2[i2 - 1]) { |
|
981 | 985 | for (l1 = i1; rchg1[i1 - 1]; i1--); |
|
982 | 986 | for (l2 = i2; rchg2[i2 - 1]; i2--); |
|
983 | 987 | |
|
984 | 988 | if (!(xch = xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) { |
|
985 | 989 | xdl_free_script(cscr); |
|
986 | 990 | return -1; |
|
987 | 991 | } |
|
988 | 992 | cscr = xch; |
|
989 | 993 | } |
|
990 | 994 | |
|
991 | 995 | *xscr = cscr; |
|
992 | 996 | |
|
993 | 997 | return 0; |
|
994 | 998 | } |
|
995 | 999 | |
|
996 | 1000 | |
|
997 | 1001 | void xdl_free_script(xdchange_t *xscr) { |
|
998 | 1002 | xdchange_t *xch; |
|
999 | 1003 | |
|
1000 | 1004 | while ((xch = xscr) != NULL) { |
|
1001 | 1005 | xscr = xscr->next; |
|
1002 | 1006 | xdl_free(xch); |
|
1003 | 1007 | } |
|
1004 | 1008 | } |
|
1005 | 1009 | |
|
1006 | 1010 | static int xdl_call_hunk_func(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb, |
|
1007 | 1011 | xdemitconf_t const *xecfg) |
|
1008 | 1012 | { |
|
1009 | 1013 | xdchange_t *xch, *xche; |
|
1010 | 1014 | if ((xecfg->flags & XDL_EMIT_BDIFFHUNK) != 0) { |
|
1011 | 1015 | long i1 = 0, i2 = 0, n1 = xe->xdf1.nrec, n2 = xe->xdf2.nrec; |
|
1012 | 1016 | for (xch = xscr; xch; xch = xche->next) { |
|
1013 | 1017 | xche = xdl_get_hunk(&xch, xecfg); |
|
1014 | 1018 | if (!xch) |
|
1015 | 1019 | break; |
|
1016 | 1020 | if (xch->i1 > i1 || xch->i2 > i2) { |
|
1017 | 1021 | if (xecfg->hunk_func(i1, xch->i1, i2, xch->i2, ecb->priv) < 0) |
|
1018 | 1022 | return -1; |
|
1019 | 1023 | } |
|
1020 | 1024 | i1 = xche->i1 + xche->chg1; |
|
1021 | 1025 | i2 = xche->i2 + xche->chg2; |
|
1022 | 1026 | } |
|
1023 | 1027 | if (xecfg->hunk_func(i1, n1, i2, n2, ecb->priv) < 0) |
|
1024 | 1028 | return -1; |
|
1025 | 1029 | } else { |
|
1026 | 1030 | for (xch = xscr; xch; xch = xche->next) { |
|
1027 | 1031 | xche = xdl_get_hunk(&xch, xecfg); |
|
1028 | 1032 | if (!xch) |
|
1029 | 1033 | break; |
|
1030 | 1034 | if (xecfg->hunk_func( |
|
1031 | 1035 | xch->i1, xche->i1 + xche->chg1 - xch->i1, |
|
1032 | 1036 | xch->i2, xche->i2 + xche->chg2 - xch->i2, |
|
1033 | 1037 | ecb->priv) < 0) |
|
1034 | 1038 | return -1; |
|
1035 | 1039 | } |
|
1036 | 1040 | } |
|
1037 | 1041 | return 0; |
|
1038 | 1042 | } |
|
1039 | 1043 | |
|
1040 | 1044 | static void xdl_mark_ignorable(xdchange_t *xscr, xdfenv_t *xe, long flags) |
|
1041 | 1045 | { |
|
1042 | 1046 | xdchange_t *xch; |
|
1043 | 1047 | |
|
1044 | 1048 | for (xch = xscr; xch; xch = xch->next) { |
|
1045 | 1049 | int ignore = 1; |
|
1046 | 1050 | xrecord_t **rec; |
|
1047 | 1051 | long i; |
|
1048 | 1052 | |
|
1049 | 1053 | rec = &xe->xdf1.recs[xch->i1]; |
|
1050 | 1054 | for (i = 0; i < xch->chg1 && ignore; i++) |
|
1051 | 1055 | ignore = xdl_blankline(rec[i]->ptr, rec[i]->size, flags); |
|
1052 | 1056 | |
|
1053 | 1057 | rec = &xe->xdf2.recs[xch->i2]; |
|
1054 | 1058 | for (i = 0; i < xch->chg2 && ignore; i++) |
|
1055 | 1059 | ignore = xdl_blankline(rec[i]->ptr, rec[i]->size, flags); |
|
1056 | 1060 | |
|
1057 | 1061 | xch->ignore = ignore; |
|
1058 | 1062 | } |
|
1059 | 1063 | } |
|
1060 | 1064 | |
|
1061 | 1065 | int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp, |
|
1062 | 1066 | xdemitconf_t const *xecfg, xdemitcb_t *ecb) { |
|
1063 | 1067 | xdchange_t *xscr; |
|
1064 | 1068 | xdfenv_t xe; |
|
1065 | 1069 | emit_func_t ef = xecfg->hunk_func ? xdl_call_hunk_func : xdl_emit_diff; |
|
1066 | 1070 | |
|
1067 | 1071 | if (xdl_do_diff(mf1, mf2, xpp, &xe) < 0) { |
|
1068 | 1072 | |
|
1069 | 1073 | return -1; |
|
1070 | 1074 | } |
|
1071 | 1075 | if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 || |
|
1072 | 1076 | xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) < 0 || |
|
1073 | 1077 | xdl_build_script(&xe, &xscr) < 0) { |
|
1074 | 1078 | |
|
1075 | 1079 | xdl_free_env(&xe); |
|
1076 | 1080 | return -1; |
|
1077 | 1081 | } |
|
1078 | 1082 | |
|
1079 | 1083 | if (xpp->flags & XDF_IGNORE_BLANK_LINES) |
|
1080 | 1084 | xdl_mark_ignorable(xscr, &xe, xpp->flags); |
|
1081 | 1085 | if (ef(&xe, xscr, ecb, xecfg) < 0) { |
|
1082 | 1086 | xdl_free_script(xscr); |
|
1083 | 1087 | xdl_free_env(&xe); |
|
1084 | 1088 | return -1; |
|
1085 | 1089 | } |
|
1086 | 1090 | xdl_free_script(xscr); |
|
1087 | 1091 | xdl_free_env(&xe); |
|
1088 | 1092 | |
|
1089 | 1093 | return 0; |
|
1090 | 1094 | } |
@@ -1,42 +1,41 | |||
|
1 | 1 | /* |
|
2 | 2 | * LibXDiff by Davide Libenzi ( File Differential Library ) |
|
3 | 3 | * Copyright (C) 2003 Davide Libenzi |
|
4 | 4 | * |
|
5 | 5 | * This library is free software; you can redistribute it and/or |
|
6 | 6 | * modify it under the terms of the GNU Lesser General Public |
|
7 | 7 | * License as published by the Free Software Foundation; either |
|
8 | 8 | * version 2.1 of the License, or (at your option) any later version. |
|
9 | 9 | * |
|
10 | 10 | * This library is distributed in the hope that it will be useful, |
|
11 | 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12 | 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
13 | 13 | * Lesser General Public License for more details. |
|
14 | 14 | * |
|
15 | 15 | * You should have received a copy of the GNU Lesser General Public |
|
16 | 16 | * License along with this library; if not, see |
|
17 | 17 | * <http://www.gnu.org/licenses/>. |
|
18 | 18 | * |
|
19 | 19 | * Davide Libenzi <davidel@xmailserver.org> |
|
20 | 20 | * |
|
21 | 21 | */ |
|
22 | 22 | |
|
23 | 23 | #if !defined(XINCLUDE_H) |
|
24 | 24 | #define XINCLUDE_H |
|
25 | 25 | |
|
26 | 26 | #include <ctype.h> |
|
27 | 27 | #include <stdio.h> |
|
28 | 28 | #include <stdlib.h> |
|
29 | #include <unistd.h> | |
|
30 | 29 | #include <string.h> |
|
31 | 30 | #include <limits.h> |
|
32 | 31 | |
|
33 | 32 | #include "xmacros.h" |
|
34 | 33 | #include "xdiff.h" |
|
35 | 34 | #include "xtypes.h" |
|
36 | 35 | #include "xutils.h" |
|
37 | 36 | #include "xprepare.h" |
|
38 | 37 | #include "xdiffi.h" |
|
39 | 38 | #include "xemit.h" |
|
40 | 39 | |
|
41 | 40 | |
|
42 | 41 | #endif /* #if !defined(XINCLUDE_H) */ |
General Comments 0
You need to be logged in to leave comments.
Login now