completion_lexer.py
74 lines
| 2.4 KiB
| text/x-python
|
PythonLexer
epatters
|
r2602 | # System library imports | ||
from pygments.token import Token, is_token_subtype | ||||
class CompletionLexer(object): | ||||
""" Uses Pygments and some auxillary information to lex code snippets for | ||||
symbol contexts. | ||||
""" | ||||
# Maps Lexer names to a list of possible name separators | ||||
separator_map = { 'C' : [ '.', '->' ], | ||||
'C++' : [ '.', '->', '::' ], | ||||
'Python' : [ '.' ] } | ||||
def __init__(self, lexer): | ||||
epatters
|
r2640 | """ Create a CompletionLexer using the specified Pygments lexer. | ||
""" | ||||
epatters
|
r2602 | self.lexer = lexer | ||
def get_context(self, string): | ||||
""" Assuming the cursor is at the end of the specified string, get the | ||||
context (a list of names) for the symbol at cursor position. | ||||
""" | ||||
context = [] | ||||
reversed_tokens = list(self._lexer.get_tokens(string)) | ||||
reversed_tokens.reverse() | ||||
epatters
|
r2640 | # Pygments often tacks on a newline when none is specified in the input. | ||
# Remove this newline. | ||||
epatters
|
r2602 | if reversed_tokens and reversed_tokens[0][1].endswith('\n') and \ | ||
not string.endswith('\n'): | ||||
reversed_tokens.pop(0) | ||||
epatters
|
r2640 | |||
epatters
|
r2720 | current_op = '' | ||
epatters
|
r2602 | for token, text in reversed_tokens: | ||
epatters
|
r2640 | |||
if is_token_subtype(token, Token.Name): | ||||
# Handle a trailing separator, e.g 'foo.bar.' | ||||
if current_op in self._name_separators: | ||||
if not context: | ||||
epatters
|
r2720 | context.insert(0, '') | ||
epatters
|
r2640 | |||
# Handle non-separator operators and punction. | ||||
elif current_op: | ||||
break | ||||
epatters
|
r2602 | context.insert(0, text) | ||
epatters
|
r2720 | current_op = '' | ||
epatters
|
r2640 | |||
# Pygments doesn't understand that, e.g., '->' is a single operator | ||||
# in C++. This is why we have to build up an operator from | ||||
# potentially several tokens. | ||||
epatters
|
r2602 | elif token is Token.Operator or token is Token.Punctuation: | ||
current_op = text + current_op | ||||
epatters
|
r2640 | |||
# Break on anything that is not a Operator, Punctuation, or Name. | ||||
epatters
|
r2602 | else: | ||
break | ||||
return context | ||||
def get_lexer(self, lexer): | ||||
return self._lexer | ||||
def set_lexer(self, lexer, name_separators=None): | ||||
self._lexer = lexer | ||||
if name_separators is None: | ||||
self._name_separators = self.separator_map.get(lexer.name, ['.']) | ||||
else: | ||||
self._name_separators = list(name_separators) | ||||
lexer = property(get_lexer, set_lexer) | ||||