# HG changeset patch # User Matt Mackall # Date 2010-06-02 19:07:46 # Node ID 7df88cdf47fde0258c9bc389162af59d76033e6b # Parent 2698a95f3f1ba3f62b151ef7f9f91a1ac30e212d revset: add support for prefix and suffix versions of : and :: diff --git a/mercurial/parser.py b/mercurial/parser.py --- a/mercurial/parser.py +++ b/mercurial/parser.py @@ -23,7 +23,10 @@ class parser(object): def _advance(self): 'advance the tokenizer' t = self.current - self.current = self._iter.next() + try: + self.current = self._iter.next() + except StopIteration: + pass return t def _match(self, m): 'make sure the tokenizer matches an end condition' @@ -49,17 +52,23 @@ class parser(object): # gather tokens until we meet a lower binding strength while bind < self._elements[self.current[0]][0]: token, value = self._advance() - # handle infix rules - infix = self._elements[token][2] - if len(infix) == 3 and infix[2] == self.current[0]: - self._match(infix[2]) - expr = (infix[0], expr, (None)) + e = self._elements[token] + # check for suffix - next token isn't a valid prefix + if len(e) == 4 and not self._elements[self.current[0]][1]: + suffix = e[3] + expr = (suffix[0], expr) else: - if not infix[0]: - raise SyntaxError("not an infix") - expr = (infix[0], expr, self._parse(infix[1])) - if len(infix) == 3: + # handle infix rules + infix = self._elements[token][2] + if len(infix) == 3 and infix[2] == self.current[0]: self._match(infix[2]) + expr = (infix[0], expr, (None)) + else: + if not infix[0]: + raise SyntaxError("not an infix") + expr = (infix[0], expr, self._parse(infix[1])) + if len(infix) == 3: + self._match(infix[2]) return expr def parse(self, message): 'generate a parse tree from a message' diff --git a/mercurial/revset.py b/mercurial/revset.py --- a/mercurial/revset.py +++ b/mercurial/revset.py @@ -12,8 +12,11 @@ import match as _match elements = { "(": (20, ("group", 1, ")"), ("func", 1, ")")), "-": (19, ("negate", 19), ("minus", 19)), - "..": (17, None, ("dagrange", 17)), - ":": (15, None, ("range", 15)), + "::": (17, ("dagrangepre", 17), ("dagrange", 17), + ("dagrangepost", 17)), + "..": (17, ("dagrangepre", 17), ("dagrange", 17), + ("dagrangepost", 17)), + ":": (15, ("rangepre", 15), ("range", 15), ("rangepost", 15)), "not": (10, ("not", 10)), "!": (10, ("not", 10)), "and": (5, None, ("and", 5)), @@ -36,11 +39,14 @@ def tokenize(program): c = program[pos] if c.isspace(): # skip inter-token whitespace pass - elif c in "():,-|&+!": # handle simple operators - yield (c, None) + elif c == ':' and program[pos:pos + 2] == '::': # look ahead carefully + yield ('::', None) + pos += 1 # skip ahead elif c == '.' and program[pos:pos + 2] == '..': # look ahead carefully yield ('..', None) pos += 1 # skip ahead + elif c in "():,-|&+!": # handle simple operators + yield (c, None) elif c in '"\'': # handle quoted strings pos += 1 s = pos @@ -126,6 +132,12 @@ def rangeset(repo, subset, x, y): return range(m, n + 1) return range(m, n - 1, -1) +def rangepreset(repo, subset, x): + return range(0, getset(repo, subset, x)[-1] + 1) + +def rangepostset(repo, subset, x): + return range(getset(repo, subset, x)[0], len(repo)) + def dagrangeset(repo, subset, x, y): return andset(repo, subset, ('func', ('symbol', 'descendants'), x), @@ -469,7 +481,11 @@ methods = { "negate": negate, "minus": minusset, "range": rangeset, + "rangepre": rangepreset, + "rangepost": rangepostset, "dagrange": dagrangeset, + "dagrangepre": ancestors, + "dagrangepost": descendants, "string": stringset, "symbol": symbolset, "and": andset,