tokenutil.py
110 lines
| 3.0 KiB
| text/x-python
|
PythonLexer
MinRK
|
r16578 | """Token-related utilities""" | ||
# Copyright (c) IPython Development Team. | ||||
# Distributed under the terms of the Modified BSD License. | ||||
from __future__ import absolute_import, print_function | ||||
from collections import namedtuple | ||||
from io import StringIO | ||||
from keyword import iskeyword | ||||
from . import tokenize2 | ||||
from .py3compat import cast_unicode_py2 | ||||
Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line']) | ||||
def generate_tokens(readline): | ||||
"""wrap generate_tokens to catch EOF errors""" | ||||
try: | ||||
for token in tokenize2.generate_tokens(readline): | ||||
yield token | ||||
except tokenize2.TokenError: | ||||
# catch EOF error | ||||
return | ||||
MinRK
|
r18478 | def line_at_cursor(cell, cursor_pos=0): | ||
"""Return the line in a cell at a given cursor position | ||||
Used for calling line-based APIs that don't support multi-line input, yet. | ||||
Parameters | ||||
---------- | ||||
cell: text | ||||
multiline block of text | ||||
cursor_pos: integer | ||||
the cursor position | ||||
Returns | ||||
------- | ||||
(line, offset): (text, integer) | ||||
The line with the current cursor, and the character offset of the start of the line. | ||||
""" | ||||
offset = 0 | ||||
lines = cell.splitlines(True) | ||||
for line in lines: | ||||
next_offset = offset + len(line) | ||||
if next_offset >= cursor_pos: | ||||
break | ||||
offset = next_offset | ||||
Doug Blank
|
r18879 | else: | ||
line = "" | ||||
MinRK
|
r18478 | return (line, offset) | ||
MinRK
|
r16580 | def token_at_cursor(cell, cursor_pos=0): | ||
MinRK
|
r16578 | """Get the token at a given cursor | ||
Used for introspection. | ||||
Parameters | ||||
---------- | ||||
cell : unicode | ||||
A block of Python code | ||||
MinRK
|
r16580 | cursor_pos : int | ||
The location of the cursor in the block where the token should be found | ||||
MinRK
|
r16578 | """ | ||
cell = cast_unicode_py2(cell) | ||||
names = [] | ||||
tokens = [] | ||||
MinRK
|
r16580 | offset = 0 | ||
MinRK
|
r16578 | for tup in generate_tokens(StringIO(cell).readline): | ||
tok = Token(*tup) | ||||
# token, text, start, end, line = tup | ||||
start_col = tok.start[1] | ||||
end_col = tok.end[1] | ||||
MinRK
|
r18453 | # allow '|foo' to find 'foo' at the beginning of a line | ||
boundary = cursor_pos + 1 if start_col == 0 else cursor_pos | ||||
if offset + start_col >= boundary: | ||||
MinRK
|
r16578 | # current token starts after the cursor, | ||
# don't consume it | ||||
break | ||||
if tok.token == tokenize2.NAME and not iskeyword(tok.text): | ||||
if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.': | ||||
names[-1] = "%s.%s" % (names[-1], tok.text) | ||||
else: | ||||
names.append(tok.text) | ||||
elif tok.token == tokenize2.OP: | ||||
if tok.text == '=' and names: | ||||
# don't inspect the lhs of an assignment | ||||
names.pop(-1) | ||||
MinRK
|
r16580 | if offset + end_col > cursor_pos: | ||
MinRK
|
r16578 | # we found the cursor, stop reading | ||
break | ||||
tokens.append(tok) | ||||
if tok.token == tokenize2.NEWLINE: | ||||
MinRK
|
r16580 | offset += len(tok.line) | ||
MinRK
|
r16578 | |||
if names: | ||||
return names[-1] | ||||
else: | ||||
return '' | ||||