##// END OF EJS Templates
url: move URL parsing functions into util to improve startup time...
url: move URL parsing functions into util to improve startup time The introduction of the new URL parsing code has created a startup time regression. This is mainly due to the use of url.hasscheme() in the ui class. It ends up importing many libraries that the url module requires. This fix helps marginally, but if we can get rid of the urllib import in the URL parser all together, startup time will go back to normal. perfstartup time before the URL refactoring (8796fb6af67e): ! wall 0.050692 comb 0.000000 user 0.000000 sys 0.000000 (best of 100) current startup time (139fb11210bb): ! wall 0.070685 comb 0.000000 user 0.000000 sys 0.000000 (best of 100) after this change: ! wall 0.064667 comb 0.000000 user 0.000000 sys 0.000000 (best of 100)

File last commit:

r13664:53db4e20 default
r14076:924c8215 default
Show More
config.py
142 lines | 4.9 KiB | text/x-python | PythonLexer
# config.py - configuration parsing for Mercurial
#
# Copyright 2009 Matt Mackall <mpm@selenic.com> and others
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from i18n import _
import error, util
import re, os
class sortdict(dict):
'a simple sorted dictionary'
def __init__(self, data=None):
self._list = []
if data:
self.update(data)
def copy(self):
return sortdict(self)
def __setitem__(self, key, val):
if key in self:
self._list.remove(key)
self._list.append(key)
dict.__setitem__(self, key, val)
def __iter__(self):
return self._list.__iter__()
def update(self, src):
for k in src:
self[k] = src[k]
def items(self):
return [(k, self[k]) for k in self._list]
def __delitem__(self, key):
dict.__delitem__(self, key)
self._list.remove(key)
class config(object):
def __init__(self, data=None):
self._data = {}
self._source = {}
if data:
for k in data._data:
self._data[k] = data[k].copy()
self._source = data._source.copy()
def copy(self):
return config(self)
def __contains__(self, section):
return section in self._data
def __getitem__(self, section):
return self._data.get(section, {})
def __iter__(self):
for d in self.sections():
yield d
def update(self, src):
for s in src:
if s not in self:
self._data[s] = sortdict()
self._data[s].update(src._data[s])
self._source.update(src._source)
def get(self, section, item, default=None):
return self._data.get(section, {}).get(item, default)
def source(self, section, item):
return self._source.get((section, item), "")
def sections(self):
return sorted(self._data.keys())
def items(self, section):
return self._data.get(section, {}).items()
def set(self, section, item, value, source=""):
if section not in self:
self._data[section] = sortdict()
self._data[section][item] = value
self._source[(section, item)] = source
def parse(self, src, data, sections=None, remap=None, include=None):
sectionre = re.compile(r'\[([^\[]+)\]')
itemre = re.compile(r'([^=\s][^=]*?)\s*=\s*(.*\S|)')
contre = re.compile(r'\s+(\S|\S.*\S)\s*$')
emptyre = re.compile(r'(;|#|\s*$)')
unsetre = re.compile(r'%unset\s+(\S+)')
includere = re.compile(r'%include\s+(\S|\S.*\S)\s*$')
section = ""
item = None
line = 0
cont = False
for l in data.splitlines(True):
line += 1
if cont:
m = contre.match(l)
if m:
if sections and section not in sections:
continue
v = self.get(section, item) + "\n" + m.group(1)
self.set(section, item, v, "%s:%d" % (src, line))
continue
item = None
cont = False
m = includere.match(l)
if m:
inc = util.expandpath(m.group(1))
base = os.path.dirname(src)
inc = os.path.normpath(os.path.join(base, inc))
if include:
try:
include(inc, remap=remap, sections=sections)
except IOError, inst:
raise error.ParseError(_("cannot include %s (%s)")
% (inc, inst.strerror),
"%s:%s" % (src, line))
continue
if emptyre.match(l):
continue
m = sectionre.match(l)
if m:
section = m.group(1)
if remap:
section = remap.get(section, section)
if section not in self:
self._data[section] = sortdict()
continue
m = itemre.match(l)
if m:
item = m.group(1)
cont = True
if sections and section not in sections:
continue
self.set(section, item, m.group(2), "%s:%d" % (src, line))
continue
m = unsetre.match(l)
if m:
name = m.group(1)
if sections and section not in sections:
continue
if self.get(section, name) is not None:
del self._data[section][name]
continue
raise error.ParseError(l.rstrip(), ("%s:%s" % (src, line)))
def read(self, path, fp=None, sections=None, remap=None):
if not fp:
fp = util.posixfile(path)
self.parse(path, fp.read(), sections, remap, self.read)