##// END OF EJS Templates
manifest: persist the manifestfulltext cache...
manifest: persist the manifestfulltext cache Reconstructing the manifest from the revlog takes time, so much so that there already is a LRU cache to avoid having to load a manifest multiple times. This patch persists that LRU cache in the .hg/cache directory, so we can re-use this cache across hg commands. Commit benchmark (run on Macos 10.13 on a 2017-model Macbook Pro with Core i7 2.9GHz and flash drive), testing without and with patch run 5 times, baseline is r2a227782e754: * committing to an existing file, against the mozilla-central repository. Baseline real time average 1.9692, with patch 1.3786. A new debugcommand "hg debugmanifestfulltextcache" lets you inspect the cache, clear it, or add specific manifest nodeids to it. When calling repo.updatecaches(), the manifest(s) for the working copy parents are added to the cache. The hg perfmanifest command has an additional --clear-disk switch to clear this cache when testing manifest loading performance. Using this command to test performance on the firefox repository for revision f947d902ed91, whose manifest has a delta chain length of 60540, we see: $ hg perfmanifest f947d902ed91 --clear-disk ! wall 0.972253 comb 0.970000 user 0.850000 sys 0.120000 (best of 10) $ hg debugmanifestfulltextcache -a `hg log --debug -r f947d902ed91 | grep manifest | cut -d: -f3` Cache contains 1 manifest entries, in order of most to least recent: id: 0294517df4aad07c70701db43bc7ff24c3ce7dbc, size 25.6 MB Total cache data size 25.6 MB, on-disk 0 bytes $ hg perfmanifest f947d902ed91 ! wall 0.036748 comb 0.040000 user 0.020000 sys 0.020000 (best of 100) Worst-case scenario: a manifest text loaded from a single delta; in the firefox repository manifest node 9a1246ff762e is the chain base for the manifest attached to revision f947d902ed91. Loading this from a full cache file is just as fast as without the cache; the extra node ids ensure a big full cache: $ for node in 9a1246ff762e 1a1922c14a3e 54a31d11a36a 0294517df4aa; do > hgd debugmanifestfulltextcache -a $node > /dev/null > done $ hgd perfmanifest -m 9a1246ff762e ! wall 0.077513 comb 0.080000 user 0.030000 sys 0.050000 (best of 100) $ hgd perfmanifest -m 9a1246ff762e --clear-disk ! wall 0.078547 comb 0.080000 user 0.070000 sys 0.010000 (best of 100)

File last commit:

r38327:068e774a default
r38803:0a57945a default
Show More
bdiff.c
321 lines | 6.9 KiB | text/x-c | CLexer
/*
bdiff.c - efficient binary diff extension for Mercurial
Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
This software may be used and distributed according to the terms of
the GNU General Public License, incorporated herein by reference.
Based roughly on Python difflib
*/
#include <limits.h>
#include <stdlib.h>
#include <string.h>
#include "bdiff.h"
#include "bitmanipulation.h"
#include "compat.h"
/* Hash implementation from diffutils */
#define ROL(v, n) ((v) << (n) | (v) >> (sizeof(v) * CHAR_BIT - (n)))
#define HASH(h, c) ((c) + ROL(h, 7))
struct pos {
int pos, len;
};
int bdiff_splitlines(const char *a, ssize_t len, struct bdiff_line **lr)
{
unsigned hash;
int i;
const char *p, *b = a;
const char *const plast = a + len - 1;
struct bdiff_line *l;
/* count the lines */
i = 1; /* extra line for sentinel */
for (p = a; p < plast; p++)
if (*p == '\n')
i++;
if (p == plast)
i++;
*lr = l = (struct bdiff_line *)calloc(i, sizeof(struct bdiff_line));
if (!l)
return -1;
/* build the line array and calculate hashes */
hash = 0;
for (p = a; p < plast; p++) {
hash = HASH(hash, *p);
if (*p == '\n') {
l->hash = hash;
hash = 0;
l->len = p - b + 1;
l->l = b;
l->n = INT_MAX;
l++;
b = p + 1;
}
}
if (p == plast) {
hash = HASH(hash, *p);
l->hash = hash;
l->len = p - b + 1;
l->l = b;
l->n = INT_MAX;
l++;
}
/* set up a sentinel */
l->hash = 0;
l->len = 0;
l->l = a + len;
return i - 1;
}
static inline int cmp(struct bdiff_line *a, struct bdiff_line *b)
{
return a->hash != b->hash || a->len != b->len ||
memcmp(a->l, b->l, a->len);
}
static int equatelines(struct bdiff_line *a, int an, struct bdiff_line *b,
int bn)
{
int i, j, buckets = 1, t, scale;
struct pos *h = NULL;
/* build a hash table of the next highest power of 2 */
while (buckets < bn + 1)
buckets *= 2;
/* try to allocate a large hash table to avoid collisions */
for (scale = 4; scale; scale /= 2) {
h = (struct pos *)calloc(buckets, scale * sizeof(struct pos));
if (h)
break;
}
if (!h)
return 0;
buckets = buckets * scale - 1;
/* clear the hash table */
for (i = 0; i <= buckets; i++) {
h[i].pos = -1;
h[i].len = 0;
}
/* add lines to the hash table chains */
for (i = 0; i < bn; i++) {
/* find the equivalence class */
for (j = b[i].hash & buckets; h[j].pos != -1;
j = (j + 1) & buckets)
if (!cmp(b + i, b + h[j].pos))
break;
/* add to the head of the equivalence class */
b[i].n = h[j].pos;
b[i].e = j;
h[j].pos = i;
h[j].len++; /* keep track of popularity */
}
/* compute popularity threshold */
t = (bn >= 31000) ? bn / 1000 : 1000000 / (bn + 1);
/* match items in a to their equivalence class in b */
for (i = 0; i < an; i++) {
/* find the equivalence class */
for (j = a[i].hash & buckets; h[j].pos != -1;
j = (j + 1) & buckets)
if (!cmp(a + i, b + h[j].pos))
break;
a[i].e = j; /* use equivalence class for quick compare */
if (h[j].len <= t)
a[i].n = h[j].pos; /* point to head of match list */
else
a[i].n = -1; /* too popular */
}
/* discard hash tables */
free(h);
return 1;
}
static int longest_match(struct bdiff_line *a, struct bdiff_line *b,
struct pos *pos, int a1, int a2, int b1, int b2,
int *omi, int *omj)
{
int mi = a1, mj = b1, mk = 0, i, j, k, half, bhalf;
/* window our search on large regions to better bound
worst-case performance. by choosing a window at the end, we
reduce skipping overhead on the b chains. */
if (a2 - a1 > 30000)
a1 = a2 - 30000;
half = (a1 + a2 - 1) / 2;
bhalf = (b1 + b2 - 1) / 2;
for (i = a1; i < a2; i++) {
/* skip all lines in b after the current block */
for (j = a[i].n; j >= b2; j = b[j].n)
;
/* loop through all lines match a[i] in b */
for (; j >= b1; j = b[j].n) {
/* does this extend an earlier match? */
for (k = 1; j - k >= b1 && i - k >= a1; k++) {
/* reached an earlier match? */
if (pos[j - k].pos == i - k) {
k += pos[j - k].len;
break;
}
/* previous line mismatch? */
if (a[i - k].e != b[j - k].e)
break;
}
pos[j].pos = i;
pos[j].len = k;
/* best match so far? we prefer matches closer
to the middle to balance recursion */
if (k > mk) {
/* a longer match */
mi = i;
mj = j;
mk = k;
} else if (k == mk) {
if (i > mi && i <= half && j > b1) {
/* same match but closer to half */
mi = i;
mj = j;
} else if (i == mi && (mj > bhalf || i == a1)) {
/* same i but best earlier j */
mj = j;
}
}
}
}
if (mk) {
mi = mi - mk + 1;
mj = mj - mk + 1;
}
/* expand match to include subsequent popular lines */
while (mi + mk < a2 && mj + mk < b2 && a[mi + mk].e == b[mj + mk].e)
mk++;
*omi = mi;
*omj = mj;
return mk;
}
static struct bdiff_hunk *recurse(struct bdiff_line *a, struct bdiff_line *b,
struct pos *pos, int a1, int a2, int b1,
int b2, struct bdiff_hunk *l)
{
int i, j, k;
while (1) {
/* find the longest match in this chunk */
k = longest_match(a, b, pos, a1, a2, b1, b2, &i, &j);
if (!k)
return l;
/* and recurse on the remaining chunks on either side */
l = recurse(a, b, pos, a1, i, b1, j, l);
if (!l)
return NULL;
l->next =
(struct bdiff_hunk *)malloc(sizeof(struct bdiff_hunk));
if (!l->next)
return NULL;
l = l->next;
l->a1 = i;
l->a2 = i + k;
l->b1 = j;
l->b2 = j + k;
l->next = NULL;
/* tail-recursion didn't happen, so do equivalent iteration */
a1 = i + k;
b1 = j + k;
}
}
int bdiff_diff(struct bdiff_line *a, int an, struct bdiff_line *b, int bn,
struct bdiff_hunk *base)
{
struct bdiff_hunk *curr;
struct pos *pos;
int t, count = 0;
/* allocate and fill arrays */
t = equatelines(a, an, b, bn);
pos = (struct pos *)calloc(bn ? bn : 1, sizeof(struct pos));
if (pos && t) {
/* generate the matching block list */
curr = recurse(a, b, pos, 0, an, 0, bn, base);
if (!curr)
return -1;
/* sentinel end hunk */
curr->next =
(struct bdiff_hunk *)malloc(sizeof(struct bdiff_hunk));
if (!curr->next)
return -1;
curr = curr->next;
curr->a1 = curr->a2 = an;
curr->b1 = curr->b2 = bn;
curr->next = NULL;
}
free(pos);
/* normalize the hunk list, try to push each hunk towards the end */
for (curr = base->next; curr; curr = curr->next) {
struct bdiff_hunk *next = curr->next;
if (!next)
break;
if (curr->a2 == next->a1 || curr->b2 == next->b1)
while (curr->a2 < an && curr->b2 < bn &&
next->a1 < next->a2 && next->b1 < next->b2 &&
!cmp(a + curr->a2, b + curr->b2)) {
curr->a2++;
next->a1++;
curr->b2++;
next->b1++;
}
}
for (curr = base->next; curr; curr = curr->next)
count++;
return count;
}
/* deallocate list of hunks; l may be NULL */
void bdiff_freehunks(struct bdiff_hunk *l)
{
struct bdiff_hunk *n;
for (; l; l = n) {
n = l->next;
free(l);
}
}