##// END OF EJS Templates
pathauditor: disable cache of audited paths by default (issue5628)...
pathauditor: disable cache of audited paths by default (issue5628) The initial attempt was to discard cache when appropriate, but it appears to be error prone. We had to carefully inspect all places where audit() is called e.g. without actually updating filesystem, before removing files and directories, etc. So, this patch disables the cache of audited paths by default, and enables it only for the following cases: - short-lived auditor objects - repo.vfs, repo.svfs, and repo.cachevfs, which are managed directories and considered sort of append-only (a file/directory would never be replaced with a symlink) There would be more cacheable vfs objects (e.g. mq.queue.opener), but I decided not to inspect all of them in this patch. We can make them cached later. Benchmark result: - using old clone of http://selenic.com/repo/linux-2.6/ (38319 files) - on tmpfs - run HGRCPATH=/dev/null hg up -q --time tip && hg up -q null - try 4 times and take the last three results original: real 7.480 secs (user 1.140+22.760 sys 0.150+1.690) real 8.010 secs (user 1.070+22.280 sys 0.170+2.120) real 7.470 secs (user 1.120+22.390 sys 0.120+1.910) clearcache (the other series): real 7.680 secs (user 1.120+23.420 sys 0.140+1.970) real 7.670 secs (user 1.110+23.620 sys 0.130+1.810) real 7.740 secs (user 1.090+23.510 sys 0.160+1.940) enable cache only for vfs and svfs (this series): real 8.730 secs (user 1.500+25.190 sys 0.260+2.260) real 8.750 secs (user 1.490+25.170 sys 0.250+2.340) real 9.010 secs (user 1.680+25.340 sys 0.280+2.540) remove cache function at all (for reference): real 9.620 secs (user 1.440+27.120 sys 0.250+2.980) real 9.420 secs (user 1.400+26.940 sys 0.320+3.130) real 9.760 secs (user 1.530+27.270 sys 0.250+2.970)

File last commit:

r32512:0e8b0b9a default
r33705:20bac46f stable
Show More
mpatch.py
170 lines | 4.8 KiB | text/x-python | PythonLexer
# mpatch.py - Python implementation of mpatch.c
#
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import struct
from . import policy, pycompat
stringio = pycompat.stringio
modulepolicy = policy.policy
policynocffi = policy.policynocffi
class mpatchError(Exception):
"""error raised when a delta cannot be decoded
"""
# This attempts to apply a series of patches in time proportional to
# the total size of the patches, rather than patches * len(text). This
# means rather than shuffling strings around, we shuffle around
# pointers to fragments with fragment lists.
#
# When the fragment lists get too long, we collapse them. To do this
# efficiently, we do all our operations inside a buffer created by
# mmap and simply use memmove. This avoids creating a bunch of large
# temporary string buffers.
def _pull(dst, src, l): # pull l bytes from src
while l:
f = src.pop()
if f[0] > l: # do we need to split?
src.append((f[0] - l, f[1] + l))
dst.append((l, f[1]))
return
dst.append(f)
l -= f[0]
def _move(m, dest, src, count):
"""move count bytes from src to dest
The file pointer is left at the end of dest.
"""
m.seek(src)
buf = m.read(count)
m.seek(dest)
m.write(buf)
def _collect(m, buf, list):
start = buf
for l, p in reversed(list):
_move(m, buf, p, l)
buf += l
return (buf - start, start)
def patches(a, bins):
if not bins:
return a
plens = [len(x) for x in bins]
pl = sum(plens)
bl = len(a) + pl
tl = bl + bl + pl # enough for the patches and two working texts
b1, b2 = 0, bl
if not tl:
return a
m = stringio()
# load our original text
m.write(a)
frags = [(len(a), b1)]
# copy all the patches into our segment so we can memmove from them
pos = b2 + bl
m.seek(pos)
for p in bins: m.write(p)
for plen in plens:
# if our list gets too long, execute it
if len(frags) > 128:
b2, b1 = b1, b2
frags = [_collect(m, b1, frags)]
new = []
end = pos + plen
last = 0
while pos < end:
m.seek(pos)
try:
p1, p2, l = struct.unpack(">lll", m.read(12))
except struct.error:
raise mpatchError("patch cannot be decoded")
_pull(new, frags, p1 - last) # what didn't change
_pull([], frags, p2 - p1) # what got deleted
new.append((l, pos + 12)) # what got added
pos += l + 12
last = p2
frags.extend(reversed(new)) # what was left at the end
t = _collect(m, b2, frags)
m.seek(t[1])
return m.read(t[0])
def patchedsize(orig, delta):
outlen, last, bin = 0, 0, 0
binend = len(delta)
data = 12
while data <= binend:
decode = delta[bin:bin + 12]
start, end, length = struct.unpack(">lll", decode)
if start > end:
break
bin = data + length
data = bin + 12
outlen += start - last
last = end
outlen += length
if bin != binend:
raise mpatchError("patch cannot be decoded")
outlen += orig - last
return outlen
if modulepolicy not in policynocffi:
try:
from _mpatch_cffi import ffi, lib
except ImportError:
if modulepolicy == 'cffi': # strict cffi import
raise
else:
@ffi.def_extern()
def cffi_get_next_item(arg, pos):
all, bins = ffi.from_handle(arg)
container = ffi.new("struct mpatch_flist*[1]")
to_pass = ffi.new("char[]", str(bins[pos]))
all.append(to_pass)
r = lib.mpatch_decode(to_pass, len(to_pass) - 1, container)
if r < 0:
return ffi.NULL
return container[0]
def patches(text, bins):
lgt = len(bins)
all = []
if not lgt:
return text
arg = (all, bins)
patch = lib.mpatch_fold(ffi.new_handle(arg),
lib.cffi_get_next_item, 0, lgt)
if not patch:
raise mpatchError("cannot decode chunk")
outlen = lib.mpatch_calcsize(len(text), patch)
if outlen < 0:
lib.mpatch_lfree(patch)
raise mpatchError("inconsistency detected")
buf = ffi.new("char[]", outlen)
if lib.mpatch_apply(buf, text, len(text), patch) < 0:
lib.mpatch_lfree(patch)
raise mpatchError("error applying patches")
res = ffi.buffer(buf, outlen)[:]
lib.mpatch_lfree(patch)
return res