##// END OF EJS Templates
hgext: officially turn 'hgext' into a namespace package...
hgext: officially turn 'hgext' into a namespace package Actually since Python 2.3, there is some way to turn top level package into "namespace package" so that multiple subpackage installed in different part of the path can still be imported transparently. This feature was previously thought (at least by myself) to be only provided by some setuptool black magic. Turning hgext into such namespace package allows third extensions to install themselves inside the "hgext" namespace package to avoid polluting the global python module namespace. They will now be able to do so without making it a pain to use a Mercurial "installed" in a different way/location than these extensions. The only constrains is that the extension ship a 'hgext/__init__.py' containing the same call to 'pkgutil.extend_path' and nothing else. This seems realistic. The main question that remains is: should we introduce a dedicated namespace for third party extension (hgext3rd?) to make a clearer distinction between what is officially supported and what is not? If so, this will be introduced in a follow up patch.

File last commit:

r26231:87c9c562 default
r28450:155e3308 default
Show More
parser.py
222 lines | 7.8 KiB | text/x-python | PythonLexer
# parser.py - simple top-down operator precedence parser for mercurial
#
# Copyright 2010 Matt Mackall <mpm@selenic.com>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
# see http://effbot.org/zone/simple-top-down-parsing.htm and
# http://eli.thegreenplace.net/2010/01/02/top-down-operator-precedence-parsing/
# for background
# takes a tokenizer and elements
# tokenizer is an iterator that returns (type, value, pos) tuples
# elements is a mapping of types to binding strength, primary, prefix, infix
# and suffix actions
# an action is a tree node name, a tree label, and an optional match
# __call__(program) parses program into a labeled tree
from __future__ import absolute_import
from .i18n import _
from . import error
class parser(object):
def __init__(self, elements, methods=None):
self._elements = elements
self._methods = methods
self.current = None
def _advance(self):
'advance the tokenizer'
t = self.current
self.current = next(self._iter, None)
return t
def _hasnewterm(self):
'True if next token may start new term'
return any(self._elements[self.current[0]][1:3])
def _match(self, m):
'make sure the tokenizer matches an end condition'
if self.current[0] != m:
raise error.ParseError(_("unexpected token: %s") % self.current[0],
self.current[2])
self._advance()
def _parseoperand(self, bind, m=None):
'gather right-hand-side operand until an end condition or binding met'
if m and self.current[0] == m:
expr = None
else:
expr = self._parse(bind)
if m:
self._match(m)
return expr
def _parse(self, bind=0):
token, value, pos = self._advance()
# handle prefix rules on current token, take as primary if unambiguous
primary, prefix = self._elements[token][1:3]
if primary and not (prefix and self._hasnewterm()):
expr = (primary, value)
elif prefix:
expr = (prefix[0], self._parseoperand(*prefix[1:]))
else:
raise error.ParseError(_("not a prefix: %s") % token, pos)
# gather tokens until we meet a lower binding strength
while bind < self._elements[self.current[0]][0]:
token, value, pos = self._advance()
# handle infix rules, take as suffix if unambiguous
infix, suffix = self._elements[token][3:]
if suffix and not (infix and self._hasnewterm()):
expr = (suffix[0], expr)
elif infix:
expr = (infix[0], expr, self._parseoperand(*infix[1:]))
else:
raise error.ParseError(_("not an infix: %s") % token, pos)
return expr
def parse(self, tokeniter):
'generate a parse tree from tokens'
self._iter = tokeniter
self._advance()
res = self._parse()
token, value, pos = self.current
return res, pos
def eval(self, tree):
'recursively evaluate a parse tree using node methods'
if not isinstance(tree, tuple):
return tree
return self._methods[tree[0]](*[self.eval(t) for t in tree[1:]])
def __call__(self, tokeniter):
'parse tokens into a parse tree and evaluate if methods given'
t = self.parse(tokeniter)
if self._methods:
return self.eval(t)
return t
def buildargsdict(trees, funcname, keys, keyvaluenode, keynode):
"""Build dict from list containing positional and keyword arguments
Invalid keywords or too many positional arguments are rejected, but
missing arguments are just omitted.
"""
if len(trees) > len(keys):
raise error.ParseError(_("%(func)s takes at most %(nargs)d arguments")
% {'func': funcname, 'nargs': len(keys)})
args = {}
# consume positional arguments
for k, x in zip(keys, trees):
if x[0] == keyvaluenode:
break
args[k] = x
# remainder should be keyword arguments
for x in trees[len(args):]:
if x[0] != keyvaluenode or x[1][0] != keynode:
raise error.ParseError(_("%(func)s got an invalid argument")
% {'func': funcname})
k = x[1][1]
if k not in keys:
raise error.ParseError(_("%(func)s got an unexpected keyword "
"argument '%(key)s'")
% {'func': funcname, 'key': k})
if k in args:
raise error.ParseError(_("%(func)s got multiple values for keyword "
"argument '%(key)s'")
% {'func': funcname, 'key': k})
args[k] = x[2]
return args
def unescapestr(s):
try:
return s.decode("string_escape")
except ValueError as e:
# mangle Python's exception into our format
raise error.ParseError(str(e).lower())
def _prettyformat(tree, leafnodes, level, lines):
if not isinstance(tree, tuple) or tree[0] in leafnodes:
lines.append((level, str(tree)))
else:
lines.append((level, '(%s' % tree[0]))
for s in tree[1:]:
_prettyformat(s, leafnodes, level + 1, lines)
lines[-1:] = [(lines[-1][0], lines[-1][1] + ')')]
def prettyformat(tree, leafnodes):
lines = []
_prettyformat(tree, leafnodes, 0, lines)
output = '\n'.join((' ' * l + s) for l, s in lines)
return output
def simplifyinfixops(tree, targetnodes):
"""Flatten chained infix operations to reduce usage of Python stack
>>> def f(tree):
... print prettyformat(simplifyinfixops(tree, ('or',)), ('symbol',))
>>> f(('or',
... ('or',
... ('symbol', '1'),
... ('symbol', '2')),
... ('symbol', '3')))
(or
('symbol', '1')
('symbol', '2')
('symbol', '3'))
>>> f(('func',
... ('symbol', 'p1'),
... ('or',
... ('or',
... ('func',
... ('symbol', 'sort'),
... ('list',
... ('or',
... ('or',
... ('symbol', '1'),
... ('symbol', '2')),
... ('symbol', '3')),
... ('negate',
... ('symbol', 'rev')))),
... ('and',
... ('symbol', '4'),
... ('group',
... ('or',
... ('or',
... ('symbol', '5'),
... ('symbol', '6')),
... ('symbol', '7'))))),
... ('symbol', '8'))))
(func
('symbol', 'p1')
(or
(func
('symbol', 'sort')
(list
(or
('symbol', '1')
('symbol', '2')
('symbol', '3'))
(negate
('symbol', 'rev'))))
(and
('symbol', '4')
(group
(or
('symbol', '5')
('symbol', '6')
('symbol', '7'))))
('symbol', '8')))
"""
if not isinstance(tree, tuple):
return tree
op = tree[0]
if op not in targetnodes:
return (op,) + tuple(simplifyinfixops(x, targetnodes) for x in tree[1:])
# walk down left nodes taking each right node. no recursion to left nodes
# because infix operators are left-associative, i.e. left tree is deep.
# e.g. '1 + 2 + 3' -> (+ (+ 1 2) 3) -> (+ 1 2 3)
simplified = []
x = tree
while x[0] == op:
l, r = x[1:]
simplified.append(simplifyinfixops(r, targetnodes))
x = l
simplified.append(simplifyinfixops(x, targetnodes))
simplified.append(op)
return tuple(reversed(simplified))