##// END OF EJS Templates
protocol: send application/mercurial-0.2 responses to capable clients...
protocol: send application/mercurial-0.2 responses to capable clients With this commit, the HTTP transport now parses the X-HgProto-<N> header to determine what media type and compression engine to use for responses. So far, we only compress responses that are already being compressed with zlib today (stream response types to specific commands). We can expand things to cover additional response types later. The practical side-effect of this commit is that non-zlib compression engines will be used if both ends support them. This means if both ends have zstd support, zstd - not zlib - will be used to compress data! When cloning the mozilla-unified repository between a local HTTP server and client, the benefits of non-zlib compression are quite noticeable: engine server CPU (s) client CPU (s) bundle size zlib (l=6) 174.1 283.2 1,148,547,026 zstd (l=1) 99.2 267.3 1,127,513,841 zstd (l=3) 103.1 266.9 1,018,861,363 zstd (l=7) 128.3 269.7 919,190,278 zstd (l=10) 162.0 - 894,547,179 none 95.3 277.2 4,097,566,064 The default zstd compression level is 3. So if you deploy zstd capable Mercurial to your clients and servers and CPU time on your server is dominated by "getbundle" requests (clients cloning and pulling) - and my experience at Mozilla tells me this is often the case - this commit could drastically reduce your server-side CPU usage *and* save on bandwidth costs! Another benefit of this change is that server operators can install *any* compression engine. While it isn't enabled by default, the "none" compression engine can now be used to disable wire protocol compression completely. Previously, commands like "getbundle" always zlib compressed output, adding considerable overhead to generating responses. If you are on a high speed network and your server is under high load, it might be advantageous to trade bandwidth for CPU. Although, zstd at level 1 doesn't use that much CPU, so I'm not convinced that disabling compression wholesale is worthwhile. And, my data seems to indicate a slow down on the client without compression. I suspect this is due to a lack of buffering resulting in an increase in socket read() calls and/or the fact we're transferring an extra 3 GB of data (parsing HTTP chunked transfer and processing extra TCP packets can add up). This is definitely worth investigating and optimizing. But since the "none" compressor isn't enabled by default, I'm inclined to punt on this issue. This commit introduces tons of tests. Some of these should arguably have been implemented on previous commits. But it was difficult to test without the server functionality in place.

File last commit:

r30349:95400242 default
r30764:e75463e3 default
Show More
config.py
173 lines | 6.1 KiB | text/x-python | PythonLexer
# config.py - configuration parsing for Mercurial
#
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from __future__ import absolute_import
import errno
import os
from .i18n import _
from . import (
error,
util,
)
class config(object):
def __init__(self, data=None, includepaths=[]):
self._data = {}
self._source = {}
self._unset = []
self._includepaths = includepaths
if data:
for k in data._data:
self._data[k] = data[k].copy()
self._source = data._source.copy()
def copy(self):
return config(self)
def __contains__(self, section):
return section in self._data
def hasitem(self, section, item):
return item in self._data.get(section, {})
def __getitem__(self, section):
return self._data.get(section, {})
def __iter__(self):
for d in self.sections():
yield d
def update(self, src):
for s, n in src._unset:
if s in self and n in self._data[s]:
del self._data[s][n]
del self._source[(s, n)]
for s in src:
if s not in self:
self._data[s] = util.sortdict()
self._data[s].update(src._data[s])
self._source.update(src._source)
def get(self, section, item, default=None):
return self._data.get(section, {}).get(item, default)
def backup(self, section, item):
"""return a tuple allowing restore to reinstall a previous value
The main reason we need it is because it handles the "no data" case.
"""
try:
value = self._data[section][item]
source = self.source(section, item)
return (section, item, value, source)
except KeyError:
return (section, item)
def source(self, section, item):
return self._source.get((section, item), "")
def sections(self):
return sorted(self._data.keys())
def items(self, section):
return self._data.get(section, {}).items()
def set(self, section, item, value, source=""):
if section not in self:
self._data[section] = util.sortdict()
self._data[section][item] = value
if source:
self._source[(section, item)] = source
def restore(self, data):
"""restore data returned by self.backup"""
if len(data) == 4:
# restore old data
section, item, value, source = data
self._data[section][item] = value
self._source[(section, item)] = source
else:
# no data before, remove everything
section, item = data
if section in self._data:
self._data[section].pop(item, None)
self._source.pop((section, item), None)
def parse(self, src, data, sections=None, remap=None, include=None):
sectionre = util.re.compile(br'\[([^\[]+)\]')
itemre = util.re.compile(br'([^=\s][^=]*?)\s*=\s*(.*\S|)')
contre = util.re.compile(br'\s+(\S|\S.*\S)\s*$')
emptyre = util.re.compile(br'(;|#|\s*$)')
commentre = util.re.compile(br'(;|#)')
unsetre = util.re.compile(br'%unset\s+(\S+)')
includere = util.re.compile(br'%include\s+(\S|\S.*\S)\s*$')
section = ""
item = None
line = 0
cont = False
for l in data.splitlines(True):
line += 1
if line == 1 and l.startswith('\xef\xbb\xbf'):
# Someone set us up the BOM
l = l[3:]
if cont:
if commentre.match(l):
continue
m = contre.match(l)
if m:
if sections and section not in sections:
continue
v = self.get(section, item) + "\n" + m.group(1)
self.set(section, item, v, "%s:%d" % (src, line))
continue
item = None
cont = False
m = includere.match(l)
if m and include:
expanded = util.expandpath(m.group(1))
includepaths = [os.path.dirname(src)] + self._includepaths
for base in includepaths:
inc = os.path.normpath(os.path.join(base, expanded))
try:
include(inc, remap=remap, sections=sections)
break
except IOError as inst:
if inst.errno != errno.ENOENT:
raise error.ParseError(_("cannot include %s (%s)")
% (inc, inst.strerror),
"%s:%s" % (src, line))
continue
if emptyre.match(l):
continue
m = sectionre.match(l)
if m:
section = m.group(1)
if remap:
section = remap.get(section, section)
if section not in self:
self._data[section] = util.sortdict()
continue
m = itemre.match(l)
if m:
item = m.group(1)
cont = True
if sections and section not in sections:
continue
self.set(section, item, m.group(2), "%s:%d" % (src, line))
continue
m = unsetre.match(l)
if m:
name = m.group(1)
if sections and section not in sections:
continue
if self.get(section, name) is not None:
del self._data[section][name]
self._unset.append((section, name))
continue
raise error.ParseError(l.rstrip(), ("%s:%s" % (src, line)))
def read(self, path, fp=None, sections=None, remap=None):
if not fp:
fp = util.posixfile(path)
self.parse(path, fp.read(), sections, remap, self.read)