diff --git a/mercurial/fileset.py b/mercurial/fileset.py --- a/mercurial/fileset.py +++ b/mercurial/fileset.py @@ -10,21 +10,21 @@ import parser, error, util, merge from i18n import _ elements = { - # token-type: binding-strength, prefix, infix, suffix - "(": (20, ("group", 1, ")"), ("func", 1, ")"), None), - "-": (5, ("negate", 19), ("minus", 5), None), - "not": (10, ("not", 10), None, None), - "!": (10, ("not", 10), None, None), - "and": (5, None, ("and", 5), None), - "&": (5, None, ("and", 5), None), - "or": (4, None, ("or", 4), None), - "|": (4, None, ("or", 4), None), - "+": (4, None, ("or", 4), None), - ",": (2, None, ("list", 2), None), - ")": (0, None, None, None), - "symbol": (0, ("symbol",), None, None), - "string": (0, ("string",), None, None), - "end": (0, None, None, None), + # token-type: binding-strength, primary, prefix, infix, suffix + "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None), + "-": (5, None, ("negate", 19), ("minus", 5), None), + "not": (10, None, ("not", 10), None, None), + "!": (10, None, ("not", 10), None, None), + "and": (5, None, None, ("and", 5), None), + "&": (5, None, None, ("and", 5), None), + "or": (4, None, None, ("or", 4), None), + "|": (4, None, None, ("or", 4), None), + "+": (4, None, None, ("or", 4), None), + ",": (2, None, None, ("list", 2), None), + ")": (0, None, None, None, None), + "symbol": (0, "symbol", None, None, None), + "string": (0, "string", None, None, None), + "end": (0, None, None, None, None), } keywords = set(['and', 'or', 'not']) diff --git a/mercurial/parser.py b/mercurial/parser.py --- a/mercurial/parser.py +++ b/mercurial/parser.py @@ -11,8 +11,8 @@ # takes a tokenizer and elements # tokenizer is an iterator that returns (type, value, pos) tuples -# elements is a mapping of types to binding strength, prefix, infix and -# suffix actions +# elements is a mapping of types to binding strength, primary, prefix, infix +# and suffix actions # an action is a tree node name, a tree label, and an optional match # __call__(program) parses program into a labeled tree @@ -31,7 +31,7 @@ class parser(object): return t def _hasnewterm(self): 'True if next token may start new term' - return bool(self._elements[self.current[0]][1]) + return any(self._elements[self.current[0]][1:3]) def _match(self, m): 'make sure the tokenizer matches an end condition' if self.current[0] != m: @@ -50,17 +50,17 @@ class parser(object): def _parse(self, bind=0): token, value, pos = self._advance() # handle prefix rules on current token - prefix = self._elements[token][1] - if not prefix: + primary, prefix = self._elements[token][1:3] + if primary: + expr = (primary, value) + elif prefix: + expr = (prefix[0], self._parseoperand(*prefix[1:])) + else: raise error.ParseError(_("not a prefix: %s") % token, pos) - if len(prefix) == 1: - expr = (prefix[0], value) - else: - expr = (prefix[0], self._parseoperand(*prefix[1:])) # gather tokens until we meet a lower binding strength while bind < self._elements[self.current[0]][0]: token, value, pos = self._advance() - infix, suffix = self._elements[token][2:] + infix, suffix = self._elements[token][3:] # check for suffix - next token isn't a valid prefix if suffix and not self._hasnewterm(): expr = (suffix[0], expr) diff --git a/mercurial/revset.py b/mercurial/revset.py --- a/mercurial/revset.py +++ b/mercurial/revset.py @@ -115,31 +115,31 @@ def _revsbetween(repo, roots, heads): return baseset(sorted(reachable)) elements = { - # token-type: binding-strength, prefix, infix, suffix - "(": (21, ("group", 1, ")"), ("func", 1, ")"), None), - "##": (20, None, ("_concat", 20), None), - "~": (18, None, ("ancestor", 18), None), - "^": (18, None, ("parent", 18), ("parentpost", 18)), - "-": (5, ("negate", 19), ("minus", 5), None), - "::": (17, ("dagrangepre", 17), ("dagrange", 17), + # token-type: binding-strength, primary, prefix, infix, suffix + "(": (21, None, ("group", 1, ")"), ("func", 1, ")"), None), + "##": (20, None, None, ("_concat", 20), None), + "~": (18, None, None, ("ancestor", 18), None), + "^": (18, None, None, ("parent", 18), ("parentpost", 18)), + "-": (5, None, ("negate", 19), ("minus", 5), None), + "::": (17, None, ("dagrangepre", 17), ("dagrange", 17), ("dagrangepost", 17)), - "..": (17, ("dagrangepre", 17), ("dagrange", 17), + "..": (17, None, ("dagrangepre", 17), ("dagrange", 17), ("dagrangepost", 17)), - ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)), - "not": (10, ("not", 10), None, None), - "!": (10, ("not", 10), None, None), - "and": (5, None, ("and", 5), None), - "&": (5, None, ("and", 5), None), - "%": (5, None, ("only", 5), ("onlypost", 5)), - "or": (4, None, ("or", 4), None), - "|": (4, None, ("or", 4), None), - "+": (4, None, ("or", 4), None), - "=": (3, None, ("keyvalue", 3), None), - ",": (2, None, ("list", 2), None), - ")": (0, None, None, None), - "symbol": (0, ("symbol",), None, None), - "string": (0, ("string",), None, None), - "end": (0, None, None, None), + ":": (15, None, ("rangepre", 15), ("range", 15), ("rangepost", 15)), + "not": (10, None, ("not", 10), None, None), + "!": (10, None, ("not", 10), None, None), + "and": (5, None, None, ("and", 5), None), + "&": (5, None, None, ("and", 5), None), + "%": (5, None, None, ("only", 5), ("onlypost", 5)), + "or": (4, None, None, ("or", 4), None), + "|": (4, None, None, ("or", 4), None), + "+": (4, None, None, ("or", 4), None), + "=": (3, None, None, ("keyvalue", 3), None), + ",": (2, None, None, ("list", 2), None), + ")": (0, None, None, None, None), + "symbol": (0, "symbol", None, None, None), + "string": (0, "string", None, None, None), + "end": (0, None, None, None, None), } keywords = set(['and', 'or', 'not']) diff --git a/mercurial/templater.py b/mercurial/templater.py --- a/mercurial/templater.py +++ b/mercurial/templater.py @@ -15,17 +15,17 @@ import minirst # template parsing elements = { - # token-type: binding-strength, prefix, infix, suffix - "(": (20, ("group", 1, ")"), ("func", 1, ")"), None), - ",": (2, None, ("list", 2), None), - "|": (5, None, ("|", 5), None), - "%": (6, None, ("%", 6), None), - ")": (0, None, None, None), - "integer": (0, ("integer",), None, None), - "symbol": (0, ("symbol",), None, None), - "string": (0, ("string",), None, None), - "template": (0, ("template",), None, None), - "end": (0, None, None, None), + # token-type: binding-strength, primary, prefix, infix, suffix + "(": (20, None, ("group", 1, ")"), ("func", 1, ")"), None), + ",": (2, None, None, ("list", 2), None), + "|": (5, None, None, ("|", 5), None), + "%": (6, None, None, ("%", 6), None), + ")": (0, None, None, None, None), + "integer": (0, "integer", None, None, None), + "symbol": (0, "symbol", None, None, None), + "string": (0, "string", None, None, None), + "template": (0, "template", None, None, None), + "end": (0, None, None, None, None), } def tokenize(program, start, end):