##// END OF EJS Templates
xdiff: fix a hard crash on Windows...
Matt Harbison -
r36941:d40b9e29 default
parent child Browse files
Show More
@@ -1,1130 +1,1130 b''
1 1 /*
2 2 * LibXDiff by Davide Libenzi ( File Differential Library )
3 3 * Copyright (C) 2003 Davide Libenzi
4 4 *
5 5 * This library is free software; you can redistribute it and/or
6 6 * modify it under the terms of the GNU Lesser General Public
7 7 * License as published by the Free Software Foundation; either
8 8 * version 2.1 of the License, or (at your option) any later version.
9 9 *
10 10 * This library is distributed in the hope that it will be useful,
11 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 13 * Lesser General Public License for more details.
14 14 *
15 15 * You should have received a copy of the GNU Lesser General Public
16 16 * License along with this library; if not, see
17 17 * <http://www.gnu.org/licenses/>.
18 18 *
19 19 * Davide Libenzi <davidel@xmailserver.org>
20 20 *
21 21 */
22 22
23 23 #include "xinclude.h"
24 24
25 25
26 26
27 27 #define XDL_MAX_COST_MIN 256
28 28 #define XDL_HEUR_MIN_COST 256
29 29 #define XDL_LINE_MAX (long)((1UL << (CHAR_BIT * sizeof(long) - 1)) - 1)
30 30 #define XDL_SNAKE_CNT 20
31 31 #define XDL_K_HEUR 4
32 32
33 33 /* VC 2008 doesn't know about the inline keyword. */
34 34 #if defined(_MSC_VER)
35 35 #define inline __forceinline
36 36 #endif
37 37
38 38
39 39 typedef struct s_xdpsplit {
40 40 int64_t i1, i2;
41 41 int min_lo, min_hi;
42 42 } xdpsplit_t;
43 43
44 44
45 45
46 46
47 47 static int64_t xdl_split(uint64_t const *ha1, int64_t off1, int64_t lim1,
48 48 uint64_t const *ha2, int64_t off2, int64_t lim2,
49 49 int64_t *kvdf, int64_t *kvdb, int need_min, xdpsplit_t *spl,
50 50 xdalgoenv_t *xenv);
51 51 static xdchange_t *xdl_add_change(xdchange_t *xscr, int64_t i1, int64_t i2, int64_t chg1, int64_t chg2);
52 52
53 53
54 54
55 55
56 56
57 57 /*
58 58 * See "An O(ND) Difference Algorithm and its Variations", by Eugene Myers.
59 59 * Basically considers a "box" (off1, off2, lim1, lim2) and scan from both
60 60 * the forward diagonal starting from (off1, off2) and the backward diagonal
61 61 * starting from (lim1, lim2). If the K values on the same diagonal crosses
62 62 * returns the furthest point of reach. We might end up having to expensive
63 63 * cases using this algorithm is full, so a little bit of heuristic is needed
64 64 * to cut the search and to return a suboptimal point.
65 65 */
66 66 static int64_t xdl_split(uint64_t const *ha1, int64_t off1, int64_t lim1,
67 67 uint64_t const *ha2, int64_t off2, int64_t lim2,
68 68 int64_t *kvdf, int64_t *kvdb, int need_min, xdpsplit_t *spl,
69 69 xdalgoenv_t *xenv) {
70 70 int64_t dmin = off1 - lim2, dmax = lim1 - off2;
71 71 int64_t fmid = off1 - off2, bmid = lim1 - lim2;
72 72 int64_t odd = (fmid - bmid) & 1;
73 73 int64_t fmin = fmid, fmax = fmid;
74 74 int64_t bmin = bmid, bmax = bmid;
75 75 int64_t ec, d, i1, i2, prev1, best, dd, v, k;
76 76
77 77 /*
78 78 * Set initial diagonal values for both forward and backward path.
79 79 */
80 80 kvdf[fmid] = off1;
81 81 kvdb[bmid] = lim1;
82 82
83 83 for (ec = 1;; ec++) {
84 84 int got_snake = 0;
85 85
86 86 /*
87 87 * We need to extent the diagonal "domain" by one. If the next
88 88 * values exits the box boundaries we need to change it in the
89 89 * opposite direction because (max - min) must be a power of two.
90 90 * Also we initialize the external K value to -1 so that we can
91 91 * avoid extra conditions check inside the core loop.
92 92 */
93 93 if (fmin > dmin)
94 94 kvdf[--fmin - 1] = -1;
95 95 else
96 96 ++fmin;
97 97 if (fmax < dmax)
98 98 kvdf[++fmax + 1] = -1;
99 99 else
100 100 --fmax;
101 101
102 102 for (d = fmax; d >= fmin; d -= 2) {
103 103 if (kvdf[d - 1] >= kvdf[d + 1])
104 104 i1 = kvdf[d - 1] + 1;
105 105 else
106 106 i1 = kvdf[d + 1];
107 107 prev1 = i1;
108 108 i2 = i1 - d;
109 109 for (; i1 < lim1 && i2 < lim2 && ha1[i1] == ha2[i2]; i1++, i2++);
110 110 if (i1 - prev1 > xenv->snake_cnt)
111 111 got_snake = 1;
112 112 kvdf[d] = i1;
113 113 if (odd && bmin <= d && d <= bmax && kvdb[d] <= i1) {
114 114 spl->i1 = i1;
115 115 spl->i2 = i2;
116 116 spl->min_lo = spl->min_hi = 1;
117 117 return ec;
118 118 }
119 119 }
120 120
121 121 /*
122 122 * We need to extent the diagonal "domain" by one. If the next
123 123 * values exits the box boundaries we need to change it in the
124 124 * opposite direction because (max - min) must be a power of two.
125 125 * Also we initialize the external K value to -1 so that we can
126 126 * avoid extra conditions check inside the core loop.
127 127 */
128 128 if (bmin > dmin)
129 129 kvdb[--bmin - 1] = XDL_LINE_MAX;
130 130 else
131 131 ++bmin;
132 132 if (bmax < dmax)
133 133 kvdb[++bmax + 1] = XDL_LINE_MAX;
134 134 else
135 135 --bmax;
136 136
137 137 for (d = bmax; d >= bmin; d -= 2) {
138 138 if (kvdb[d - 1] < kvdb[d + 1])
139 139 i1 = kvdb[d - 1];
140 140 else
141 141 i1 = kvdb[d + 1] - 1;
142 142 prev1 = i1;
143 143 i2 = i1 - d;
144 144 for (; i1 > off1 && i2 > off2 && ha1[i1 - 1] == ha2[i2 - 1]; i1--, i2--);
145 145 if (prev1 - i1 > xenv->snake_cnt)
146 146 got_snake = 1;
147 147 kvdb[d] = i1;
148 148 if (!odd && fmin <= d && d <= fmax && i1 <= kvdf[d]) {
149 149 spl->i1 = i1;
150 150 spl->i2 = i2;
151 151 spl->min_lo = spl->min_hi = 1;
152 152 return ec;
153 153 }
154 154 }
155 155
156 156 if (need_min)
157 157 continue;
158 158
159 159 /*
160 160 * If the edit cost is above the heuristic trigger and if
161 161 * we got a good snake, we sample current diagonals to see
162 162 * if some of the, have reached an "interesting" path. Our
163 163 * measure is a function of the distance from the diagonal
164 164 * corner (i1 + i2) penalized with the distance from the
165 165 * mid diagonal itself. If this value is above the current
166 166 * edit cost times a magic factor (XDL_K_HEUR) we consider
167 167 * it interesting.
168 168 */
169 169 if (got_snake && ec > xenv->heur_min) {
170 170 for (best = 0, d = fmax; d >= fmin; d -= 2) {
171 171 dd = d > fmid ? d - fmid: fmid - d;
172 172 i1 = kvdf[d];
173 173 i2 = i1 - d;
174 174 v = (i1 - off1) + (i2 - off2) - dd;
175 175
176 176 if (v > XDL_K_HEUR * ec && v > best &&
177 177 off1 + xenv->snake_cnt <= i1 && i1 < lim1 &&
178 178 off2 + xenv->snake_cnt <= i2 && i2 < lim2) {
179 179 for (k = 1; ha1[i1 - k] == ha2[i2 - k]; k++)
180 180 if (k == xenv->snake_cnt) {
181 181 best = v;
182 182 spl->i1 = i1;
183 183 spl->i2 = i2;
184 184 break;
185 185 }
186 186 }
187 187 }
188 188 if (best > 0) {
189 189 spl->min_lo = 1;
190 190 spl->min_hi = 0;
191 191 return ec;
192 192 }
193 193
194 194 for (best = 0, d = bmax; d >= bmin; d -= 2) {
195 195 dd = d > bmid ? d - bmid: bmid - d;
196 196 i1 = kvdb[d];
197 197 i2 = i1 - d;
198 198 v = (lim1 - i1) + (lim2 - i2) - dd;
199 199
200 200 if (v > XDL_K_HEUR * ec && v > best &&
201 201 off1 < i1 && i1 <= lim1 - xenv->snake_cnt &&
202 202 off2 < i2 && i2 <= lim2 - xenv->snake_cnt) {
203 203 for (k = 0; ha1[i1 + k] == ha2[i2 + k]; k++)
204 204 if (k == xenv->snake_cnt - 1) {
205 205 best = v;
206 206 spl->i1 = i1;
207 207 spl->i2 = i2;
208 208 break;
209 209 }
210 210 }
211 211 }
212 212 if (best > 0) {
213 213 spl->min_lo = 0;
214 214 spl->min_hi = 1;
215 215 return ec;
216 216 }
217 217 }
218 218
219 219 /*
220 220 * Enough is enough. We spent too much time here and now we collect
221 221 * the furthest reaching path using the (i1 + i2) measure.
222 222 */
223 223 if (ec >= xenv->mxcost) {
224 224 int64_t fbest, fbest1, bbest, bbest1;
225 225
226 226 fbest = fbest1 = -1;
227 227 for (d = fmax; d >= fmin; d -= 2) {
228 228 i1 = XDL_MIN(kvdf[d], lim1);
229 229 i2 = i1 - d;
230 230 if (lim2 < i2)
231 231 i1 = lim2 + d, i2 = lim2;
232 232 if (fbest < i1 + i2) {
233 233 fbest = i1 + i2;
234 234 fbest1 = i1;
235 235 }
236 236 }
237 237
238 238 bbest = bbest1 = XDL_LINE_MAX;
239 239 for (d = bmax; d >= bmin; d -= 2) {
240 240 i1 = XDL_MAX(off1, kvdb[d]);
241 241 i2 = i1 - d;
242 242 if (i2 < off2)
243 243 i1 = off2 + d, i2 = off2;
244 244 if (i1 + i2 < bbest) {
245 245 bbest = i1 + i2;
246 246 bbest1 = i1;
247 247 }
248 248 }
249 249
250 250 if ((lim1 + lim2) - bbest < fbest - (off1 + off2)) {
251 251 spl->i1 = fbest1;
252 252 spl->i2 = fbest - fbest1;
253 253 spl->min_lo = 1;
254 254 spl->min_hi = 0;
255 255 } else {
256 256 spl->i1 = bbest1;
257 257 spl->i2 = bbest - bbest1;
258 258 spl->min_lo = 0;
259 259 spl->min_hi = 1;
260 260 }
261 261 return ec;
262 262 }
263 263 }
264 264 }
265 265
266 266
267 267 /*
268 268 * Rule: "Divide et Impera". Recursively split the box in sub-boxes by calling
269 269 * the box splitting function. Note that the real job (marking changed lines)
270 270 * is done in the two boundary reaching checks.
271 271 */
272 272 int xdl_recs_cmp(diffdata_t *dd1, int64_t off1, int64_t lim1,
273 273 diffdata_t *dd2, int64_t off2, int64_t lim2,
274 274 int64_t *kvdf, int64_t *kvdb, int need_min, xdalgoenv_t *xenv) {
275 275 uint64_t const *ha1 = dd1->ha, *ha2 = dd2->ha;
276 276
277 277 /*
278 278 * Shrink the box by walking through each diagonal snake (SW and NE).
279 279 */
280 280 for (; off1 < lim1 && off2 < lim2 && ha1[off1] == ha2[off2]; off1++, off2++);
281 281 for (; off1 < lim1 && off2 < lim2 && ha1[lim1 - 1] == ha2[lim2 - 1]; lim1--, lim2--);
282 282
283 283 /*
284 284 * If one dimension is empty, then all records on the other one must
285 285 * be obviously changed.
286 286 */
287 287 if (off1 == lim1) {
288 288 char *rchg2 = dd2->rchg;
289 289 int64_t *rindex2 = dd2->rindex;
290 290
291 291 for (; off2 < lim2; off2++)
292 292 rchg2[rindex2[off2]] = 1;
293 293 } else if (off2 == lim2) {
294 294 char *rchg1 = dd1->rchg;
295 295 int64_t *rindex1 = dd1->rindex;
296 296
297 297 for (; off1 < lim1; off1++)
298 298 rchg1[rindex1[off1]] = 1;
299 299 } else {
300 300 xdpsplit_t spl;
301 301 spl.i1 = spl.i2 = 0;
302 302
303 303 /*
304 304 * Divide ...
305 305 */
306 306 if (xdl_split(ha1, off1, lim1, ha2, off2, lim2, kvdf, kvdb,
307 307 need_min, &spl, xenv) < 0) {
308 308
309 309 return -1;
310 310 }
311 311
312 312 /*
313 313 * ... et Impera.
314 314 */
315 315 if (xdl_recs_cmp(dd1, off1, spl.i1, dd2, off2, spl.i2,
316 316 kvdf, kvdb, spl.min_lo, xenv) < 0 ||
317 317 xdl_recs_cmp(dd1, spl.i1, lim1, dd2, spl.i2, lim2,
318 318 kvdf, kvdb, spl.min_hi, xenv) < 0) {
319 319
320 320 return -1;
321 321 }
322 322 }
323 323
324 324 return 0;
325 325 }
326 326
327 327
328 328 int xdl_do_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
329 329 xdfenv_t *xe) {
330 330 int64_t ndiags;
331 331 int64_t *kvd, *kvdf, *kvdb;
332 332 xdalgoenv_t xenv;
333 333 diffdata_t dd1, dd2;
334 334
335 335 if (xdl_prepare_env(mf1, mf2, xpp, xe) < 0) {
336 336
337 337 return -1;
338 338 }
339 339
340 340 /*
341 341 * Allocate and setup K vectors to be used by the differential algorithm.
342 342 * One is to store the forward path and one to store the backward path.
343 343 */
344 344 ndiags = xe->xdf1.nreff + xe->xdf2.nreff + 3;
345 if (!(kvd = (int64_t *) xdl_malloc((2 * ndiags + 2) * sizeof(long)))) {
345 if (!(kvd = (int64_t *) xdl_malloc((2 * ndiags + 2) * sizeof(int64_t)))) {
346 346
347 347 xdl_free_env(xe);
348 348 return -1;
349 349 }
350 350 kvdf = kvd;
351 351 kvdb = kvdf + ndiags;
352 352 kvdf += xe->xdf2.nreff + 1;
353 353 kvdb += xe->xdf2.nreff + 1;
354 354
355 355 xenv.mxcost = xdl_bogosqrt(ndiags);
356 356 if (xenv.mxcost < XDL_MAX_COST_MIN)
357 357 xenv.mxcost = XDL_MAX_COST_MIN;
358 358 xenv.snake_cnt = XDL_SNAKE_CNT;
359 359 xenv.heur_min = XDL_HEUR_MIN_COST;
360 360
361 361 dd1.nrec = xe->xdf1.nreff;
362 362 dd1.ha = xe->xdf1.ha;
363 363 dd1.rchg = xe->xdf1.rchg;
364 364 dd1.rindex = xe->xdf1.rindex;
365 365 dd2.nrec = xe->xdf2.nreff;
366 366 dd2.ha = xe->xdf2.ha;
367 367 dd2.rchg = xe->xdf2.rchg;
368 368 dd2.rindex = xe->xdf2.rindex;
369 369
370 370 if (xdl_recs_cmp(&dd1, 0, dd1.nrec, &dd2, 0, dd2.nrec,
371 371 kvdf, kvdb, (xpp->flags & XDF_NEED_MINIMAL) != 0, &xenv) < 0) {
372 372
373 373 xdl_free(kvd);
374 374 xdl_free_env(xe);
375 375 return -1;
376 376 }
377 377
378 378 xdl_free(kvd);
379 379
380 380 return 0;
381 381 }
382 382
383 383
384 384 static xdchange_t *xdl_add_change(xdchange_t *xscr, int64_t i1, int64_t i2, int64_t chg1, int64_t chg2) {
385 385 xdchange_t *xch;
386 386
387 387 if (!(xch = (xdchange_t *) xdl_malloc(sizeof(xdchange_t))))
388 388 return NULL;
389 389
390 390 xch->next = xscr;
391 391 xch->i1 = i1;
392 392 xch->i2 = i2;
393 393 xch->chg1 = chg1;
394 394 xch->chg2 = chg2;
395 395 xch->ignore = 0;
396 396
397 397 return xch;
398 398 }
399 399
400 400
401 401 static int recs_match(xrecord_t *rec1, xrecord_t *rec2)
402 402 {
403 403 return (rec1->ha == rec2->ha &&
404 404 xdl_recmatch(rec1->ptr, rec1->size,
405 405 rec2->ptr, rec2->size));
406 406 }
407 407
408 408 /*
409 409 * If a line is indented more than this, get_indent() just returns this value.
410 410 * This avoids having to do absurd amounts of work for data that are not
411 411 * human-readable text, and also ensures that the output of get_indent fits within
412 412 * an int.
413 413 */
414 414 #define MAX_INDENT 200
415 415
416 416 /*
417 417 * Return the amount of indentation of the specified line, treating TAB as 8
418 418 * columns. Return -1 if line is empty or contains only whitespace. Clamp the
419 419 * output value at MAX_INDENT.
420 420 */
421 421 static int get_indent(xrecord_t *rec)
422 422 {
423 423 int64_t i;
424 424 int ret = 0;
425 425
426 426 for (i = 0; i < rec->size; i++) {
427 427 char c = rec->ptr[i];
428 428
429 429 if (!XDL_ISSPACE(c))
430 430 return ret;
431 431 else if (c == ' ')
432 432 ret += 1;
433 433 else if (c == '\t')
434 434 ret += 8 - ret % 8;
435 435 /* ignore other whitespace characters */
436 436
437 437 if (ret >= MAX_INDENT)
438 438 return MAX_INDENT;
439 439 }
440 440
441 441 /* The line contains only whitespace. */
442 442 return -1;
443 443 }
444 444
445 445 /*
446 446 * If more than this number of consecutive blank rows are found, just return this
447 447 * value. This avoids requiring O(N^2) work for pathological cases, and also
448 448 * ensures that the output of score_split fits in an int.
449 449 */
450 450 #define MAX_BLANKS 20
451 451
452 452 /* Characteristics measured about a hypothetical split position. */
453 453 struct split_measurement {
454 454 /*
455 455 * Is the split at the end of the file (aside from any blank lines)?
456 456 */
457 457 int end_of_file;
458 458
459 459 /*
460 460 * How much is the line immediately following the split indented (or -1 if
461 461 * the line is blank):
462 462 */
463 463 int indent;
464 464
465 465 /*
466 466 * How many consecutive lines above the split are blank?
467 467 */
468 468 int pre_blank;
469 469
470 470 /*
471 471 * How much is the nearest non-blank line above the split indented (or -1
472 472 * if there is no such line)?
473 473 */
474 474 int pre_indent;
475 475
476 476 /*
477 477 * How many lines after the line following the split are blank?
478 478 */
479 479 int post_blank;
480 480
481 481 /*
482 482 * How much is the nearest non-blank line after the line following the
483 483 * split indented (or -1 if there is no such line)?
484 484 */
485 485 int post_indent;
486 486 };
487 487
488 488 struct split_score {
489 489 /* The effective indent of this split (smaller is preferred). */
490 490 int effective_indent;
491 491
492 492 /* Penalty for this split (smaller is preferred). */
493 493 int penalty;
494 494 };
495 495
496 496 /*
497 497 * Fill m with information about a hypothetical split of xdf above line split.
498 498 */
499 499 static void measure_split(const xdfile_t *xdf, int64_t split,
500 500 struct split_measurement *m)
501 501 {
502 502 int64_t i;
503 503
504 504 if (split >= xdf->nrec) {
505 505 m->end_of_file = 1;
506 506 m->indent = -1;
507 507 } else {
508 508 m->end_of_file = 0;
509 509 m->indent = get_indent(xdf->recs[split]);
510 510 }
511 511
512 512 m->pre_blank = 0;
513 513 m->pre_indent = -1;
514 514 for (i = split - 1; i >= 0; i--) {
515 515 m->pre_indent = get_indent(xdf->recs[i]);
516 516 if (m->pre_indent != -1)
517 517 break;
518 518 m->pre_blank += 1;
519 519 if (m->pre_blank == MAX_BLANKS) {
520 520 m->pre_indent = 0;
521 521 break;
522 522 }
523 523 }
524 524
525 525 m->post_blank = 0;
526 526 m->post_indent = -1;
527 527 for (i = split + 1; i < xdf->nrec; i++) {
528 528 m->post_indent = get_indent(xdf->recs[i]);
529 529 if (m->post_indent != -1)
530 530 break;
531 531 m->post_blank += 1;
532 532 if (m->post_blank == MAX_BLANKS) {
533 533 m->post_indent = 0;
534 534 break;
535 535 }
536 536 }
537 537 }
538 538
539 539 /*
540 540 * The empirically-determined weight factors used by score_split() below.
541 541 * Larger values means that the position is a less favorable place to split.
542 542 *
543 543 * Note that scores are only ever compared against each other, so multiplying
544 544 * all of these weight/penalty values by the same factor wouldn't change the
545 545 * heuristic's behavior. Still, we need to set that arbitrary scale *somehow*.
546 546 * In practice, these numbers are chosen to be large enough that they can be
547 547 * adjusted relative to each other with sufficient precision despite using
548 548 * integer math.
549 549 */
550 550
551 551 /* Penalty if there are no non-blank lines before the split */
552 552 #define START_OF_FILE_PENALTY 1
553 553
554 554 /* Penalty if there are no non-blank lines after the split */
555 555 #define END_OF_FILE_PENALTY 21
556 556
557 557 /* Multiplier for the number of blank lines around the split */
558 558 #define TOTAL_BLANK_WEIGHT (-30)
559 559
560 560 /* Multiplier for the number of blank lines after the split */
561 561 #define POST_BLANK_WEIGHT 6
562 562
563 563 /*
564 564 * Penalties applied if the line is indented more than its predecessor
565 565 */
566 566 #define RELATIVE_INDENT_PENALTY (-4)
567 567 #define RELATIVE_INDENT_WITH_BLANK_PENALTY 10
568 568
569 569 /*
570 570 * Penalties applied if the line is indented less than both its predecessor and
571 571 * its successor
572 572 */
573 573 #define RELATIVE_OUTDENT_PENALTY 24
574 574 #define RELATIVE_OUTDENT_WITH_BLANK_PENALTY 17
575 575
576 576 /*
577 577 * Penalties applied if the line is indented less than its predecessor but not
578 578 * less than its successor
579 579 */
580 580 #define RELATIVE_DEDENT_PENALTY 23
581 581 #define RELATIVE_DEDENT_WITH_BLANK_PENALTY 17
582 582
583 583 /*
584 584 * We only consider whether the sum of the effective indents for splits are
585 585 * less than (-1), equal to (0), or greater than (+1) each other. The resulting
586 586 * value is multiplied by the following weight and combined with the penalty to
587 587 * determine the better of two scores.
588 588 */
589 589 #define INDENT_WEIGHT 60
590 590
591 591 /*
592 592 * Compute a badness score for the hypothetical split whose measurements are
593 593 * stored in m. The weight factors were determined empirically using the tools and
594 594 * corpus described in
595 595 *
596 596 * https://github.com/mhagger/diff-slider-tools
597 597 *
598 598 * Also see that project if you want to improve the weights based on, for example,
599 599 * a larger or more diverse corpus.
600 600 */
601 601 static void score_add_split(const struct split_measurement *m, struct split_score *s)
602 602 {
603 603 /*
604 604 * A place to accumulate penalty factors (positive makes this index more
605 605 * favored):
606 606 */
607 607 int post_blank, total_blank, indent, any_blanks;
608 608
609 609 if (m->pre_indent == -1 && m->pre_blank == 0)
610 610 s->penalty += START_OF_FILE_PENALTY;
611 611
612 612 if (m->end_of_file)
613 613 s->penalty += END_OF_FILE_PENALTY;
614 614
615 615 /*
616 616 * Set post_blank to the number of blank lines following the split,
617 617 * including the line immediately after the split:
618 618 */
619 619 post_blank = (m->indent == -1) ? 1 + m->post_blank : 0;
620 620 total_blank = m->pre_blank + post_blank;
621 621
622 622 /* Penalties based on nearby blank lines: */
623 623 s->penalty += TOTAL_BLANK_WEIGHT * total_blank;
624 624 s->penalty += POST_BLANK_WEIGHT * post_blank;
625 625
626 626 if (m->indent != -1)
627 627 indent = m->indent;
628 628 else
629 629 indent = m->post_indent;
630 630
631 631 any_blanks = (total_blank != 0);
632 632
633 633 /* Note that the effective indent is -1 at the end of the file: */
634 634 s->effective_indent += indent;
635 635
636 636 if (indent == -1) {
637 637 /* No additional adjustments needed. */
638 638 } else if (m->pre_indent == -1) {
639 639 /* No additional adjustments needed. */
640 640 } else if (indent > m->pre_indent) {
641 641 /*
642 642 * The line is indented more than its predecessor.
643 643 */
644 644 s->penalty += any_blanks ?
645 645 RELATIVE_INDENT_WITH_BLANK_PENALTY :
646 646 RELATIVE_INDENT_PENALTY;
647 647 } else if (indent == m->pre_indent) {
648 648 /*
649 649 * The line has the same indentation level as its predecessor.
650 650 * No additional adjustments needed.
651 651 */
652 652 } else {
653 653 /*
654 654 * The line is indented less than its predecessor. It could be
655 655 * the block terminator of the previous block, but it could
656 656 * also be the start of a new block (e.g., an "else" block, or
657 657 * maybe the previous block didn't have a block terminator).
658 658 * Try to distinguish those cases based on what comes next:
659 659 */
660 660 if (m->post_indent != -1 && m->post_indent > indent) {
661 661 /*
662 662 * The following line is indented more. So it is likely
663 663 * that this line is the start of a block.
664 664 */
665 665 s->penalty += any_blanks ?
666 666 RELATIVE_OUTDENT_WITH_BLANK_PENALTY :
667 667 RELATIVE_OUTDENT_PENALTY;
668 668 } else {
669 669 /*
670 670 * That was probably the end of a block.
671 671 */
672 672 s->penalty += any_blanks ?
673 673 RELATIVE_DEDENT_WITH_BLANK_PENALTY :
674 674 RELATIVE_DEDENT_PENALTY;
675 675 }
676 676 }
677 677 }
678 678
679 679 static int score_cmp(struct split_score *s1, struct split_score *s2)
680 680 {
681 681 /* -1 if s1.effective_indent < s2->effective_indent, etc. */
682 682 int cmp_indents = ((s1->effective_indent > s2->effective_indent) -
683 683 (s1->effective_indent < s2->effective_indent));
684 684
685 685 return INDENT_WEIGHT * cmp_indents + (s1->penalty - s2->penalty);
686 686 }
687 687
688 688 /*
689 689 * Represent a group of changed lines in an xdfile_t (i.e., a contiguous group
690 690 * of lines that was inserted or deleted from the corresponding version of the
691 691 * file). We consider there to be such a group at the beginning of the file, at
692 692 * the end of the file, and between any two unchanged lines, though most such
693 693 * groups will usually be empty.
694 694 *
695 695 * If the first line in a group is equal to the line following the group, then
696 696 * the group can be slid down. Similarly, if the last line in a group is equal
697 697 * to the line preceding the group, then the group can be slid up. See
698 698 * group_slide_down() and group_slide_up().
699 699 *
700 700 * Note that loops that are testing for changed lines in xdf->rchg do not need
701 701 * index bounding since the array is prepared with a zero at position -1 and N.
702 702 */
703 703 struct xdlgroup {
704 704 /*
705 705 * The index of the first changed line in the group, or the index of
706 706 * the unchanged line above which the (empty) group is located.
707 707 */
708 708 int64_t start;
709 709
710 710 /*
711 711 * The index of the first unchanged line after the group. For an empty
712 712 * group, end is equal to start.
713 713 */
714 714 int64_t end;
715 715 };
716 716
717 717 /*
718 718 * Initialize g to point at the first group in xdf.
719 719 */
720 720 static void group_init(xdfile_t *xdf, struct xdlgroup *g)
721 721 {
722 722 g->start = g->end = 0;
723 723 while (xdf->rchg[g->end])
724 724 g->end++;
725 725 }
726 726
727 727 /*
728 728 * Move g to describe the next (possibly empty) group in xdf and return 0. If g
729 729 * is already at the end of the file, do nothing and return -1.
730 730 */
731 731 static inline int group_next(xdfile_t *xdf, struct xdlgroup *g)
732 732 {
733 733 if (g->end == xdf->nrec)
734 734 return -1;
735 735
736 736 g->start = g->end + 1;
737 737 for (g->end = g->start; xdf->rchg[g->end]; g->end++)
738 738 ;
739 739
740 740 return 0;
741 741 }
742 742
743 743 /*
744 744 * Move g to describe the previous (possibly empty) group in xdf and return 0.
745 745 * If g is already at the beginning of the file, do nothing and return -1.
746 746 */
747 747 static inline int group_previous(xdfile_t *xdf, struct xdlgroup *g)
748 748 {
749 749 if (g->start == 0)
750 750 return -1;
751 751
752 752 g->end = g->start - 1;
753 753 for (g->start = g->end; xdf->rchg[g->start - 1]; g->start--)
754 754 ;
755 755
756 756 return 0;
757 757 }
758 758
759 759 /*
760 760 * If g can be slid toward the end of the file, do so, and if it bumps into a
761 761 * following group, expand this group to include it. Return 0 on success or -1
762 762 * if g cannot be slid down.
763 763 */
764 764 static int group_slide_down(xdfile_t *xdf, struct xdlgroup *g)
765 765 {
766 766 if (g->end < xdf->nrec &&
767 767 recs_match(xdf->recs[g->start], xdf->recs[g->end])) {
768 768 xdf->rchg[g->start++] = 0;
769 769 xdf->rchg[g->end++] = 1;
770 770
771 771 while (xdf->rchg[g->end])
772 772 g->end++;
773 773
774 774 return 0;
775 775 } else {
776 776 return -1;
777 777 }
778 778 }
779 779
780 780 /*
781 781 * If g can be slid toward the beginning of the file, do so, and if it bumps
782 782 * into a previous group, expand this group to include it. Return 0 on success
783 783 * or -1 if g cannot be slid up.
784 784 */
785 785 static int group_slide_up(xdfile_t *xdf, struct xdlgroup *g)
786 786 {
787 787 if (g->start > 0 &&
788 788 recs_match(xdf->recs[g->start - 1], xdf->recs[g->end - 1])) {
789 789 xdf->rchg[--g->start] = 1;
790 790 xdf->rchg[--g->end] = 0;
791 791
792 792 while (xdf->rchg[g->start - 1])
793 793 g->start--;
794 794
795 795 return 0;
796 796 } else {
797 797 return -1;
798 798 }
799 799 }
800 800
801 801 static void xdl_bug(const char *msg)
802 802 {
803 803 fprintf(stderr, "BUG: %s\n", msg);
804 804 exit(1);
805 805 }
806 806
807 807 /*
808 808 * For indentation heuristic, skip searching for better slide position after
809 809 * checking MAX_BORING lines without finding an improvement. This defends the
810 810 * indentation heuristic logic against pathological cases. The value is not
811 811 * picked scientifically but should be good enough.
812 812 */
813 813 #define MAX_BORING 100
814 814
815 815 /*
816 816 * Move back and forward change groups for a consistent and pretty diff output.
817 817 * This also helps in finding joinable change groups and reducing the diff
818 818 * size.
819 819 */
820 820 int xdl_change_compact(xdfile_t *xdf, xdfile_t *xdfo, int64_t flags) {
821 821 struct xdlgroup g, go;
822 822 int64_t earliest_end, end_matching_other;
823 823 int64_t groupsize;
824 824
825 825 group_init(xdf, &g);
826 826 group_init(xdfo, &go);
827 827
828 828 while (1) {
829 829 /* If the group is empty in the to-be-compacted file, skip it: */
830 830 if (g.end == g.start)
831 831 goto next;
832 832
833 833 /*
834 834 * Now shift the change up and then down as far as possible in
835 835 * each direction. If it bumps into any other changes, merge them.
836 836 */
837 837 do {
838 838 groupsize = g.end - g.start;
839 839
840 840 /*
841 841 * Keep track of the last "end" index that causes this
842 842 * group to align with a group of changed lines in the
843 843 * other file. -1 indicates that we haven't found such
844 844 * a match yet:
845 845 */
846 846 end_matching_other = -1;
847 847
848 848 /* Shift the group backward as much as possible: */
849 849 while (!group_slide_up(xdf, &g))
850 850 if (group_previous(xdfo, &go))
851 851 xdl_bug("group sync broken sliding up");
852 852
853 853 /*
854 854 * This is this highest that this group can be shifted.
855 855 * Record its end index:
856 856 */
857 857 earliest_end = g.end;
858 858
859 859 if (go.end > go.start)
860 860 end_matching_other = g.end;
861 861
862 862 /* Now shift the group forward as far as possible: */
863 863 while (1) {
864 864 if (group_slide_down(xdf, &g))
865 865 break;
866 866 if (group_next(xdfo, &go))
867 867 xdl_bug("group sync broken sliding down");
868 868
869 869 if (go.end > go.start)
870 870 end_matching_other = g.end;
871 871 }
872 872 } while (groupsize != g.end - g.start);
873 873
874 874 /*
875 875 * If the group can be shifted, then we can possibly use this
876 876 * freedom to produce a more intuitive diff.
877 877 *
878 878 * The group is currently shifted as far down as possible, so the
879 879 * heuristics below only have to handle upwards shifts.
880 880 */
881 881
882 882 if (g.end == earliest_end) {
883 883 /* no shifting was possible */
884 884 } else if (end_matching_other != -1) {
885 885 /*
886 886 * Move the possibly merged group of changes back to line
887 887 * up with the last group of changes from the other file
888 888 * that it can align with.
889 889 */
890 890 while (go.end == go.start) {
891 891 if (group_slide_up(xdf, &g))
892 892 xdl_bug("match disappeared");
893 893 if (group_previous(xdfo, &go))
894 894 xdl_bug("group sync broken sliding to match");
895 895 }
896 896 } else if (flags & XDF_INDENT_HEURISTIC) {
897 897 /*
898 898 * Indent heuristic: a group of pure add/delete lines
899 899 * implies two splits, one between the end of the "before"
900 900 * context and the start of the group, and another between
901 901 * the end of the group and the beginning of the "after"
902 902 * context. Some splits are aesthetically better and some
903 903 * are worse. We compute a badness "score" for each split,
904 904 * and add the scores for the two splits to define a
905 905 * "score" for each position that the group can be shifted
906 906 * to. Then we pick the shift with the lowest score.
907 907 */
908 908 int64_t shift, best_shift = -1;
909 909 struct split_score best_score;
910 910
911 911 /*
912 912 * This is O(N * MAX_BLANKS) (N = shift-able lines).
913 913 * Even with MAX_BLANKS bounded to a small value, a
914 914 * large N could still make this loop take several
915 915 * times longer than the main diff algorithm. The
916 916 * "boring" value is to help cut down N to something
917 917 * like (MAX_BORING + groupsize).
918 918 *
919 919 * Scan from bottom to top. So we can exit the loop
920 920 * without compromising the assumption "for a same best
921 921 * score, pick the bottommost shift".
922 922 */
923 923 int boring = 0;
924 924 for (shift = g.end; shift >= earliest_end; shift--) {
925 925 struct split_measurement m;
926 926 struct split_score score = {0, 0};
927 927 int cmp;
928 928
929 929 measure_split(xdf, shift, &m);
930 930 score_add_split(&m, &score);
931 931 measure_split(xdf, shift - groupsize, &m);
932 932 score_add_split(&m, &score);
933 933
934 934 if (best_shift == -1) {
935 935 cmp = -1;
936 936 } else {
937 937 cmp = score_cmp(&score, &best_score);
938 938 }
939 939 if (cmp < 0) {
940 940 boring = 0;
941 941 best_score.effective_indent = score.effective_indent;
942 942 best_score.penalty = score.penalty;
943 943 best_shift = shift;
944 944 } else {
945 945 boring += 1;
946 946 if (boring >= MAX_BORING)
947 947 break;
948 948 }
949 949 }
950 950
951 951 while (g.end > best_shift) {
952 952 if (group_slide_up(xdf, &g))
953 953 xdl_bug("best shift unreached");
954 954 if (group_previous(xdfo, &go))
955 955 xdl_bug("group sync broken sliding to blank line");
956 956 }
957 957 }
958 958
959 959 next:
960 960 /* Move past the just-processed group: */
961 961 if (group_next(xdf, &g))
962 962 break;
963 963 if (group_next(xdfo, &go))
964 964 xdl_bug("group sync broken moving to next group");
965 965 }
966 966
967 967 if (!group_next(xdfo, &go))
968 968 xdl_bug("group sync broken at end of file");
969 969
970 970 return 0;
971 971 }
972 972
973 973
974 974 int xdl_build_script(xdfenv_t *xe, xdchange_t **xscr) {
975 975 xdchange_t *cscr = NULL, *xch;
976 976 char *rchg1 = xe->xdf1.rchg, *rchg2 = xe->xdf2.rchg;
977 977 int64_t i1, i2, l1, l2;
978 978
979 979 /*
980 980 * Trivial. Collects "groups" of changes and creates an edit script.
981 981 */
982 982 for (i1 = xe->xdf1.nrec, i2 = xe->xdf2.nrec; i1 >= 0 || i2 >= 0; i1--, i2--)
983 983 if (rchg1[i1 - 1] || rchg2[i2 - 1]) {
984 984 for (l1 = i1; rchg1[i1 - 1]; i1--);
985 985 for (l2 = i2; rchg2[i2 - 1]; i2--);
986 986
987 987 if (!(xch = xdl_add_change(cscr, i1, i2, l1 - i1, l2 - i2))) {
988 988 xdl_free_script(cscr);
989 989 return -1;
990 990 }
991 991 cscr = xch;
992 992 }
993 993
994 994 *xscr = cscr;
995 995
996 996 return 0;
997 997 }
998 998
999 999
1000 1000 void xdl_free_script(xdchange_t *xscr) {
1001 1001 xdchange_t *xch;
1002 1002
1003 1003 while ((xch = xscr) != NULL) {
1004 1004 xscr = xscr->next;
1005 1005 xdl_free(xch);
1006 1006 }
1007 1007 }
1008 1008
1009 1009
1010 1010 /*
1011 1011 * Starting at the passed change atom, find the latest change atom to be included
1012 1012 * inside the differential hunk according to the specified configuration.
1013 1013 * Also advance xscr if the first changes must be discarded.
1014 1014 */
1015 1015 xdchange_t *xdl_get_hunk(xdchange_t **xscr)
1016 1016 {
1017 1017 xdchange_t *xch, *xchp, *lxch;
1018 1018 uint64_t ignored = 0; /* number of ignored blank lines */
1019 1019
1020 1020 /* remove ignorable changes that are too far before other changes */
1021 1021 for (xchp = *xscr; xchp && xchp->ignore; xchp = xchp->next) {
1022 1022 xch = xchp->next;
1023 1023
1024 1024 if (xch == NULL ||
1025 1025 xch->i1 - (xchp->i1 + xchp->chg1) >= 0)
1026 1026 *xscr = xch;
1027 1027 }
1028 1028
1029 1029 if (*xscr == NULL)
1030 1030 return NULL;
1031 1031
1032 1032 lxch = *xscr;
1033 1033
1034 1034 for (xchp = *xscr, xch = xchp->next; xch; xchp = xch, xch = xch->next) {
1035 1035 int64_t distance = xch->i1 - (xchp->i1 + xchp->chg1);
1036 1036 if (distance > 0)
1037 1037 break;
1038 1038
1039 1039 if (distance < 0 && (!xch->ignore || lxch == xchp)) {
1040 1040 lxch = xch;
1041 1041 ignored = 0;
1042 1042 } else if (distance < 0 && xch->ignore) {
1043 1043 ignored += xch->chg2;
1044 1044 } else if (lxch != xchp &&
1045 1045 xch->i1 + ignored - (lxch->i1 + lxch->chg1) > 0) {
1046 1046 break;
1047 1047 } else if (!xch->ignore) {
1048 1048 lxch = xch;
1049 1049 ignored = 0;
1050 1050 } else {
1051 1051 ignored += xch->chg2;
1052 1052 }
1053 1053 }
1054 1054
1055 1055 return lxch;
1056 1056 }
1057 1057
1058 1058
1059 1059 static int xdl_call_hunk_func(xdfenv_t *xe, xdchange_t *xscr, xdemitcb_t *ecb,
1060 1060 xdemitconf_t const *xecfg)
1061 1061 {
1062 1062 int64_t p = xe->nprefix, s = xe->nsuffix;
1063 1063 xdchange_t *xch, *xche;
1064 1064
1065 1065 if (!xecfg->hunk_func)
1066 1066 return -1;
1067 1067
1068 1068 if ((xecfg->flags & XDL_EMIT_BDIFFHUNK) != 0) {
1069 1069 int64_t i1 = 0, i2 = 0, n1 = xe->xdf1.nrec, n2 = xe->xdf2.nrec;
1070 1070 for (xch = xscr; xch; xch = xche->next) {
1071 1071 xche = xdl_get_hunk(&xch);
1072 1072 if (!xch)
1073 1073 break;
1074 1074 if (xch != xche)
1075 1075 xdl_bug("xch != xche");
1076 1076 xch->i1 += p;
1077 1077 xch->i2 += p;
1078 1078 if (xch->i1 > i1 || xch->i2 > i2) {
1079 1079 if (xecfg->hunk_func(i1, xch->i1, i2, xch->i2, ecb->priv) < 0)
1080 1080 return -1;
1081 1081 }
1082 1082 i1 = xche->i1 + xche->chg1;
1083 1083 i2 = xche->i2 + xche->chg2;
1084 1084 }
1085 1085 if (xecfg->hunk_func(i1, n1 + p + s, i2, n2 + p + s,
1086 1086 ecb->priv) < 0)
1087 1087 return -1;
1088 1088 } else {
1089 1089 for (xch = xscr; xch; xch = xche->next) {
1090 1090 xche = xdl_get_hunk(&xch);
1091 1091 if (!xch)
1092 1092 break;
1093 1093 if (xecfg->hunk_func(xch->i1 + p,
1094 1094 xche->i1 + xche->chg1 - xch->i1,
1095 1095 xch->i2 + p,
1096 1096 xche->i2 + xche->chg2 - xch->i2,
1097 1097 ecb->priv) < 0)
1098 1098 return -1;
1099 1099 }
1100 1100 }
1101 1101 return 0;
1102 1102 }
1103 1103
1104 1104 int xdl_diff(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
1105 1105 xdemitconf_t const *xecfg, xdemitcb_t *ecb) {
1106 1106 xdchange_t *xscr;
1107 1107 xdfenv_t xe;
1108 1108
1109 1109 if (xdl_do_diff(mf1, mf2, xpp, &xe) < 0) {
1110 1110
1111 1111 return -1;
1112 1112 }
1113 1113 if (xdl_change_compact(&xe.xdf1, &xe.xdf2, xpp->flags) < 0 ||
1114 1114 xdl_change_compact(&xe.xdf2, &xe.xdf1, xpp->flags) < 0 ||
1115 1115 xdl_build_script(&xe, &xscr) < 0) {
1116 1116
1117 1117 xdl_free_env(&xe);
1118 1118 return -1;
1119 1119 }
1120 1120
1121 1121 if (xdl_call_hunk_func(&xe, xscr, ecb, xecfg) < 0) {
1122 1122 xdl_free_script(xscr);
1123 1123 xdl_free_env(&xe);
1124 1124 return -1;
1125 1125 }
1126 1126 xdl_free_script(xscr);
1127 1127 xdl_free_env(&xe);
1128 1128
1129 1129 return 0;
1130 1130 }
@@ -1,552 +1,552 b''
1 1 /*
2 2 * LibXDiff by Davide Libenzi ( File Differential Library )
3 3 * Copyright (C) 2003 Davide Libenzi
4 4 *
5 5 * This library is free software; you can redistribute it and/or
6 6 * modify it under the terms of the GNU Lesser General Public
7 7 * License as published by the Free Software Foundation; either
8 8 * version 2.1 of the License, or (at your option) any later version.
9 9 *
10 10 * This library is distributed in the hope that it will be useful,
11 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 13 * Lesser General Public License for more details.
14 14 *
15 15 * You should have received a copy of the GNU Lesser General Public
16 16 * License along with this library; if not, see
17 17 * <http://www.gnu.org/licenses/>.
18 18 *
19 19 * Davide Libenzi <davidel@xmailserver.org>
20 20 *
21 21 */
22 22
23 23 #include "xinclude.h"
24 24
25 25
26 26 #define XDL_KPDIS_RUN 4
27 27 #define XDL_MAX_EQLIMIT 1024
28 28 #define XDL_SIMSCAN_WINDOW 100
29 29 #define XDL_GUESS_NLINES1 256
30 30
31 31
32 32 typedef struct s_xdlclass {
33 33 struct s_xdlclass *next;
34 34 uint64_t ha;
35 35 char const *line;
36 36 int64_t size;
37 37 int64_t idx;
38 38 int64_t len1, len2;
39 39 } xdlclass_t;
40 40
41 41 typedef struct s_xdlclassifier {
42 42 unsigned int hbits;
43 43 int64_t hsize;
44 44 xdlclass_t **rchash;
45 45 chastore_t ncha;
46 46 xdlclass_t **rcrecs;
47 47 int64_t alloc;
48 48 int64_t count;
49 49 int64_t flags;
50 50 } xdlclassifier_t;
51 51
52 52
53 53
54 54
55 55 static int xdl_init_classifier(xdlclassifier_t *cf, int64_t size, int64_t flags);
56 56 static void xdl_free_classifier(xdlclassifier_t *cf);
57 57 static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash,
58 58 unsigned int hbits, xrecord_t *rec);
59 59 static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, int64_t narec,
60 60 xdlclassifier_t *cf, xdfile_t *xdf);
61 61 static void xdl_free_ctx(xdfile_t *xdf);
62 62 static int xdl_clean_mmatch(char const *dis, int64_t i, int64_t s, int64_t e);
63 63 static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2);
64 64 static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2);
65 65 static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2);
66 66
67 67
68 68
69 69
70 70 static int xdl_init_classifier(xdlclassifier_t *cf, int64_t size, int64_t flags) {
71 71 cf->flags = flags;
72 72
73 73 cf->hbits = xdl_hashbits(size);
74 74 cf->hsize = ((uint64_t)1) << cf->hbits;
75 75
76 76 if (xdl_cha_init(&cf->ncha, sizeof(xdlclass_t), size / 4 + 1) < 0) {
77 77
78 78 return -1;
79 79 }
80 80 if (!(cf->rchash = (xdlclass_t **) xdl_malloc(cf->hsize * sizeof(xdlclass_t *)))) {
81 81
82 82 xdl_cha_free(&cf->ncha);
83 83 return -1;
84 84 }
85 85 memset(cf->rchash, 0, cf->hsize * sizeof(xdlclass_t *));
86 86
87 87 cf->alloc = size;
88 88 if (!(cf->rcrecs = (xdlclass_t **) xdl_malloc(cf->alloc * sizeof(xdlclass_t *)))) {
89 89
90 90 xdl_free(cf->rchash);
91 91 xdl_cha_free(&cf->ncha);
92 92 return -1;
93 93 }
94 94
95 95 cf->count = 0;
96 96
97 97 return 0;
98 98 }
99 99
100 100
101 101 static void xdl_free_classifier(xdlclassifier_t *cf) {
102 102
103 103 xdl_free(cf->rcrecs);
104 104 xdl_free(cf->rchash);
105 105 xdl_cha_free(&cf->ncha);
106 106 }
107 107
108 108
109 109 static int xdl_classify_record(unsigned int pass, xdlclassifier_t *cf, xrecord_t **rhash,
110 110 unsigned int hbits, xrecord_t *rec) {
111 111 int64_t hi;
112 112 char const *line;
113 113 xdlclass_t *rcrec;
114 114 xdlclass_t **rcrecs;
115 115
116 116 line = rec->ptr;
117 117 hi = (long) XDL_HASHLONG(rec->ha, cf->hbits);
118 118 for (rcrec = cf->rchash[hi]; rcrec; rcrec = rcrec->next)
119 119 if (rcrec->ha == rec->ha &&
120 120 xdl_recmatch(rcrec->line, rcrec->size,
121 121 rec->ptr, rec->size))
122 122 break;
123 123
124 124 if (!rcrec) {
125 125 if (!(rcrec = xdl_cha_alloc(&cf->ncha))) {
126 126
127 127 return -1;
128 128 }
129 129 rcrec->idx = cf->count++;
130 130 if (cf->count > cf->alloc) {
131 131 cf->alloc *= 2;
132 132 if (!(rcrecs = (xdlclass_t **) xdl_realloc(cf->rcrecs, cf->alloc * sizeof(xdlclass_t *)))) {
133 133
134 134 return -1;
135 135 }
136 136 cf->rcrecs = rcrecs;
137 137 }
138 138 cf->rcrecs[rcrec->idx] = rcrec;
139 139 rcrec->line = line;
140 140 rcrec->size = rec->size;
141 141 rcrec->ha = rec->ha;
142 142 rcrec->len1 = rcrec->len2 = 0;
143 143 rcrec->next = cf->rchash[hi];
144 144 cf->rchash[hi] = rcrec;
145 145 }
146 146
147 147 (pass == 1) ? rcrec->len1++ : rcrec->len2++;
148 148
149 149 rec->ha = (unsigned long) rcrec->idx;
150 150
151 151 hi = (long) XDL_HASHLONG(rec->ha, hbits);
152 152 rec->next = rhash[hi];
153 153 rhash[hi] = rec;
154 154
155 155 return 0;
156 156 }
157 157
158 158
159 159 /*
160 160 * Trim common prefix from files.
161 161 *
162 162 * Note: trimming could affect hunk shifting. But the performance benefit
163 163 * outweighs the shift change. A diff result with suboptimal shifting is still
164 164 * valid.
165 165 */
166 166 static void xdl_trim_files(mmfile_t *mf1, mmfile_t *mf2, int64_t reserved,
167 167 xdfenv_t *xe, mmfile_t *out_mf1, mmfile_t *out_mf2) {
168 168 mmfile_t msmall, mlarge;
169 169 /* prefix lines, prefix bytes, suffix lines, suffix bytes */
170 170 int64_t plines = 0, pbytes = 0, slines = 0, sbytes = 0, i;
171 171 /* prefix char pointer for msmall and mlarge */
172 172 const char *pp1, *pp2;
173 173 /* suffix char pointer for msmall and mlarge */
174 174 const char *ps1, *ps2;
175 175
176 176 /* reserved must >= 0 for the line boundary adjustment to work */
177 177 if (reserved < 0)
178 178 reserved = 0;
179 179
180 180 if (mf1->size < mf2->size) {
181 181 memcpy(&msmall, mf1, sizeof(mmfile_t));
182 182 memcpy(&mlarge, mf2, sizeof(mmfile_t));
183 183 } else {
184 184 memcpy(&msmall, mf2, sizeof(mmfile_t));
185 185 memcpy(&mlarge, mf1, sizeof(mmfile_t));
186 186 }
187 187
188 188 pp1 = msmall.ptr, pp2 = mlarge.ptr;
189 189 for (i = 0; i < msmall.size && *pp1 == *pp2; ++i) {
190 190 plines += (*pp1 == '\n');
191 191 pp1++, pp2++;
192 192 }
193 193
194 194 ps1 = msmall.ptr + msmall.size - 1, ps2 = mlarge.ptr + mlarge.size - 1;
195 195 while (ps1 > pp1 && *ps1 == *ps2) {
196 196 slines += (*ps1 == '\n');
197 197 ps1--, ps2--;
198 198 }
199 199
200 200 /* Retract common prefix and suffix boundaries for reserved lines */
201 201 if (plines <= reserved + 1) {
202 202 plines = 0;
203 203 } else {
204 204 i = 0;
205 205 while (i <= reserved) {
206 206 pp1--;
207 207 i += (*pp1 == '\n');
208 208 }
209 209 /* The new mmfile starts at the next char just after '\n' */
210 210 pbytes = pp1 - msmall.ptr + 1;
211 211 plines -= reserved;
212 212 }
213 213
214 214 if (slines <= reserved + 1) {
215 215 slines = 0;
216 216 } else {
217 217 /* Note: with compiler SIMD support (ex. -O3 -mavx2), this
218 218 * might perform better than memchr. */
219 219 i = 0;
220 220 while (i <= reserved) {
221 221 ps1++;
222 222 i += (*ps1 == '\n');
223 223 }
224 224 /* The new mmfile includes this '\n' */
225 225 sbytes = msmall.ptr + msmall.size - ps1 - 1;
226 226 slines -= reserved;
227 227 if (msmall.ptr[msmall.size - 1] == '\n')
228 228 slines -= 1;
229 229 }
230 230
231 231 xe->nprefix = plines;
232 232 xe->nsuffix = slines;
233 233 out_mf1->ptr = mf1->ptr + pbytes;
234 234 out_mf1->size = mf1->size - pbytes - sbytes;
235 235 out_mf2->ptr = mf2->ptr + pbytes;
236 236 out_mf2->size = mf2->size - pbytes - sbytes;
237 237 }
238 238
239 239
240 240 static int xdl_prepare_ctx(unsigned int pass, mmfile_t *mf, int64_t narec,
241 241 xdlclassifier_t *cf, xdfile_t *xdf) {
242 242 unsigned int hbits;
243 243 int64_t nrec, hsize, bsize;
244 244 uint64_t hav;
245 245 char const *blk, *cur, *top, *prev;
246 246 xrecord_t *crec;
247 247 xrecord_t **recs, **rrecs;
248 248 xrecord_t **rhash;
249 249 uint64_t *ha;
250 250 char *rchg;
251 251 int64_t *rindex;
252 252
253 253 ha = NULL;
254 254 rindex = NULL;
255 255 rchg = NULL;
256 256 rhash = NULL;
257 257 recs = NULL;
258 258
259 259 if (xdl_cha_init(&xdf->rcha, sizeof(xrecord_t), narec / 4 + 1) < 0)
260 260 goto abort;
261 261 if (!(recs = (xrecord_t **) xdl_malloc(narec * sizeof(xrecord_t *))))
262 262 goto abort;
263 263
264 264 {
265 265 hbits = xdl_hashbits(narec);
266 266 hsize = ((uint64_t)1) << hbits;
267 267 if (!(rhash = (xrecord_t **) xdl_malloc(hsize * sizeof(xrecord_t *))))
268 268 goto abort;
269 269 memset(rhash, 0, hsize * sizeof(xrecord_t *));
270 270 }
271 271
272 272 nrec = 0;
273 273 if ((cur = blk = xdl_mmfile_first(mf, &bsize)) != NULL) {
274 274 for (top = blk + bsize; cur < top; ) {
275 275 prev = cur;
276 276 hav = xdl_hash_record(&cur, top);
277 277 if (nrec >= narec) {
278 278 narec *= 2;
279 279 if (!(rrecs = (xrecord_t **) xdl_realloc(recs, narec * sizeof(xrecord_t *))))
280 280 goto abort;
281 281 recs = rrecs;
282 282 }
283 283 if (!(crec = xdl_cha_alloc(&xdf->rcha)))
284 284 goto abort;
285 285 crec->ptr = prev;
286 286 crec->size = (long) (cur - prev);
287 287 crec->ha = hav;
288 288 recs[nrec++] = crec;
289 289
290 290 if (xdl_classify_record(pass, cf, rhash, hbits, crec) < 0)
291 291 goto abort;
292 292 }
293 293 }
294 294
295 295 if (!(rchg = (char *) xdl_malloc((nrec + 2) * sizeof(char))))
296 296 goto abort;
297 297 memset(rchg, 0, (nrec + 2) * sizeof(char));
298 298
299 if (!(rindex = (int64_t *) xdl_malloc((nrec + 1) * sizeof(long))))
299 if (!(rindex = (int64_t *) xdl_malloc((nrec + 1) * sizeof(int64_t))))
300 300 goto abort;
301 if (!(ha = (uint64_t *) xdl_malloc((nrec + 1) * sizeof(unsigned long))))
301 if (!(ha = (uint64_t *) xdl_malloc((nrec + 1) * sizeof(uint64_t))))
302 302 goto abort;
303 303
304 304 xdf->nrec = nrec;
305 305 xdf->recs = recs;
306 306 xdf->hbits = hbits;
307 307 xdf->rhash = rhash;
308 308 xdf->rchg = rchg + 1;
309 309 xdf->rindex = rindex;
310 310 xdf->nreff = 0;
311 311 xdf->ha = ha;
312 312 xdf->dstart = 0;
313 313 xdf->dend = nrec - 1;
314 314
315 315 return 0;
316 316
317 317 abort:
318 318 xdl_free(ha);
319 319 xdl_free(rindex);
320 320 xdl_free(rchg);
321 321 xdl_free(rhash);
322 322 xdl_free(recs);
323 323 xdl_cha_free(&xdf->rcha);
324 324 return -1;
325 325 }
326 326
327 327
328 328 static void xdl_free_ctx(xdfile_t *xdf) {
329 329
330 330 xdl_free(xdf->rhash);
331 331 xdl_free(xdf->rindex);
332 332 xdl_free(xdf->rchg - 1);
333 333 xdl_free(xdf->ha);
334 334 xdl_free(xdf->recs);
335 335 xdl_cha_free(&xdf->rcha);
336 336 }
337 337
338 338 /* Reserved lines for trimming, to leave room for shifting */
339 339 #define TRIM_RESERVED_LINES 100
340 340
341 341 int xdl_prepare_env(mmfile_t *mf1, mmfile_t *mf2, xpparam_t const *xpp,
342 342 xdfenv_t *xe) {
343 343 int64_t enl1, enl2, sample;
344 344 mmfile_t tmf1, tmf2;
345 345 xdlclassifier_t cf;
346 346
347 347 memset(&cf, 0, sizeof(cf));
348 348
349 349 sample = XDL_GUESS_NLINES1;
350 350
351 351 enl1 = xdl_guess_lines(mf1, sample) + 1;
352 352 enl2 = xdl_guess_lines(mf2, sample) + 1;
353 353
354 354 if (xdl_init_classifier(&cf, enl1 + enl2 + 1, xpp->flags) < 0)
355 355 return -1;
356 356
357 357 xdl_trim_files(mf1, mf2, TRIM_RESERVED_LINES, xe, &tmf1, &tmf2);
358 358
359 359 if (xdl_prepare_ctx(1, &tmf1, enl1, &cf, &xe->xdf1) < 0) {
360 360
361 361 xdl_free_classifier(&cf);
362 362 return -1;
363 363 }
364 364 if (xdl_prepare_ctx(2, &tmf2, enl2, &cf, &xe->xdf2) < 0) {
365 365
366 366 xdl_free_ctx(&xe->xdf1);
367 367 xdl_free_classifier(&cf);
368 368 return -1;
369 369 }
370 370
371 371 if (xdl_optimize_ctxs(&cf, &xe->xdf1, &xe->xdf2) < 0) {
372 372 xdl_free_ctx(&xe->xdf2);
373 373 xdl_free_ctx(&xe->xdf1);
374 374 xdl_free_classifier(&cf);
375 375 return -1;
376 376 }
377 377
378 378 xdl_free_classifier(&cf);
379 379
380 380 return 0;
381 381 }
382 382
383 383
384 384 void xdl_free_env(xdfenv_t *xe) {
385 385
386 386 xdl_free_ctx(&xe->xdf2);
387 387 xdl_free_ctx(&xe->xdf1);
388 388 }
389 389
390 390
391 391 static int xdl_clean_mmatch(char const *dis, int64_t i, int64_t s, int64_t e) {
392 392 int64_t r, rdis0, rpdis0, rdis1, rpdis1;
393 393
394 394 /*
395 395 * Limits the window the is examined during the similar-lines
396 396 * scan. The loops below stops when dis[i - r] == 1 (line that
397 397 * has no match), but there are corner cases where the loop
398 398 * proceed all the way to the extremities by causing huge
399 399 * performance penalties in case of big files.
400 400 */
401 401 if (i - s > XDL_SIMSCAN_WINDOW)
402 402 s = i - XDL_SIMSCAN_WINDOW;
403 403 if (e - i > XDL_SIMSCAN_WINDOW)
404 404 e = i + XDL_SIMSCAN_WINDOW;
405 405
406 406 /*
407 407 * Scans the lines before 'i' to find a run of lines that either
408 408 * have no match (dis[j] == 0) or have multiple matches (dis[j] > 1).
409 409 * Note that we always call this function with dis[i] > 1, so the
410 410 * current line (i) is already a multimatch line.
411 411 */
412 412 for (r = 1, rdis0 = 0, rpdis0 = 1; (i - r) >= s; r++) {
413 413 if (!dis[i - r])
414 414 rdis0++;
415 415 else if (dis[i - r] == 2)
416 416 rpdis0++;
417 417 else
418 418 break;
419 419 }
420 420 /*
421 421 * If the run before the line 'i' found only multimatch lines, we
422 422 * return 0 and hence we don't make the current line (i) discarded.
423 423 * We want to discard multimatch lines only when they appear in the
424 424 * middle of runs with nomatch lines (dis[j] == 0).
425 425 */
426 426 if (rdis0 == 0)
427 427 return 0;
428 428 for (r = 1, rdis1 = 0, rpdis1 = 1; (i + r) <= e; r++) {
429 429 if (!dis[i + r])
430 430 rdis1++;
431 431 else if (dis[i + r] == 2)
432 432 rpdis1++;
433 433 else
434 434 break;
435 435 }
436 436 /*
437 437 * If the run after the line 'i' found only multimatch lines, we
438 438 * return 0 and hence we don't make the current line (i) discarded.
439 439 */
440 440 if (rdis1 == 0)
441 441 return 0;
442 442 rdis1 += rdis0;
443 443 rpdis1 += rpdis0;
444 444
445 445 return rpdis1 * XDL_KPDIS_RUN < (rpdis1 + rdis1);
446 446 }
447 447
448 448
449 449 /*
450 450 * Try to reduce the problem complexity, discard records that have no
451 451 * matches on the other file. Also, lines that have multiple matches
452 452 * might be potentially discarded if they happear in a run of discardable.
453 453 */
454 454 static int xdl_cleanup_records(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2) {
455 455 int64_t i, nm, nreff, mlim;
456 456 xrecord_t **recs;
457 457 xdlclass_t *rcrec;
458 458 char *dis, *dis1, *dis2;
459 459
460 460 if (!(dis = (char *) xdl_malloc(xdf1->nrec + xdf2->nrec + 2))) {
461 461
462 462 return -1;
463 463 }
464 464 memset(dis, 0, xdf1->nrec + xdf2->nrec + 2);
465 465 dis1 = dis;
466 466 dis2 = dis1 + xdf1->nrec + 1;
467 467
468 468 if ((mlim = xdl_bogosqrt(xdf1->nrec)) > XDL_MAX_EQLIMIT)
469 469 mlim = XDL_MAX_EQLIMIT;
470 470 for (i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart]; i <= xdf1->dend; i++, recs++) {
471 471 rcrec = cf->rcrecs[(*recs)->ha];
472 472 nm = rcrec ? rcrec->len2 : 0;
473 473 dis1[i] = (nm == 0) ? 0: (nm >= mlim) ? 2: 1;
474 474 }
475 475
476 476 if ((mlim = xdl_bogosqrt(xdf2->nrec)) > XDL_MAX_EQLIMIT)
477 477 mlim = XDL_MAX_EQLIMIT;
478 478 for (i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart]; i <= xdf2->dend; i++, recs++) {
479 479 rcrec = cf->rcrecs[(*recs)->ha];
480 480 nm = rcrec ? rcrec->len1 : 0;
481 481 dis2[i] = (nm == 0) ? 0: (nm >= mlim) ? 2: 1;
482 482 }
483 483
484 484 for (nreff = 0, i = xdf1->dstart, recs = &xdf1->recs[xdf1->dstart];
485 485 i <= xdf1->dend; i++, recs++) {
486 486 if (dis1[i] == 1 ||
487 487 (dis1[i] == 2 && !xdl_clean_mmatch(dis1, i, xdf1->dstart, xdf1->dend))) {
488 488 xdf1->rindex[nreff] = i;
489 489 xdf1->ha[nreff] = (*recs)->ha;
490 490 nreff++;
491 491 } else
492 492 xdf1->rchg[i] = 1;
493 493 }
494 494 xdf1->nreff = nreff;
495 495
496 496 for (nreff = 0, i = xdf2->dstart, recs = &xdf2->recs[xdf2->dstart];
497 497 i <= xdf2->dend; i++, recs++) {
498 498 if (dis2[i] == 1 ||
499 499 (dis2[i] == 2 && !xdl_clean_mmatch(dis2, i, xdf2->dstart, xdf2->dend))) {
500 500 xdf2->rindex[nreff] = i;
501 501 xdf2->ha[nreff] = (*recs)->ha;
502 502 nreff++;
503 503 } else
504 504 xdf2->rchg[i] = 1;
505 505 }
506 506 xdf2->nreff = nreff;
507 507
508 508 xdl_free(dis);
509 509
510 510 return 0;
511 511 }
512 512
513 513
514 514 /*
515 515 * Early trim initial and terminal matching records.
516 516 */
517 517 static int xdl_trim_ends(xdfile_t *xdf1, xdfile_t *xdf2) {
518 518 int64_t i, lim;
519 519 xrecord_t **recs1, **recs2;
520 520
521 521 recs1 = xdf1->recs;
522 522 recs2 = xdf2->recs;
523 523 for (i = 0, lim = XDL_MIN(xdf1->nrec, xdf2->nrec); i < lim;
524 524 i++, recs1++, recs2++)
525 525 if ((*recs1)->ha != (*recs2)->ha)
526 526 break;
527 527
528 528 xdf1->dstart = xdf2->dstart = i;
529 529
530 530 recs1 = xdf1->recs + xdf1->nrec - 1;
531 531 recs2 = xdf2->recs + xdf2->nrec - 1;
532 532 for (lim -= i, i = 0; i < lim; i++, recs1--, recs2--)
533 533 if ((*recs1)->ha != (*recs2)->ha)
534 534 break;
535 535
536 536 xdf1->dend = xdf1->nrec - i - 1;
537 537 xdf2->dend = xdf2->nrec - i - 1;
538 538
539 539 return 0;
540 540 }
541 541
542 542
543 543 static int xdl_optimize_ctxs(xdlclassifier_t *cf, xdfile_t *xdf1, xdfile_t *xdf2) {
544 544
545 545 if (xdl_trim_ends(xdf1, xdf2) < 0 ||
546 546 xdl_cleanup_records(cf, xdf1, xdf2) < 0) {
547 547
548 548 return -1;
549 549 }
550 550
551 551 return 0;
552 552 }
General Comments 0
You need to be logged in to leave comments. Login now