Show More
@@ -0,0 +1,56 b'' | |||
|
1 | """Tests for tokenutil""" | |
|
2 | # Copyright (c) IPython Development Team. | |
|
3 | # Distributed under the terms of the Modified BSD License. | |
|
4 | ||
|
5 | import nose.tools as nt | |
|
6 | ||
|
7 | from IPython.utils.tokenutil import token_at_cursor | |
|
8 | ||
|
9 | def expect_token(expected, cell, column, line=0): | |
|
10 | token = token_at_cursor(cell, column, line) | |
|
11 | ||
|
12 | lines = cell.splitlines() | |
|
13 | line_with_cursor = '%s|%s' % (lines[line][:column], lines[line][column:]) | |
|
14 | line | |
|
15 | nt.assert_equal(token, expected, | |
|
16 | "Excpected %r, got %r in: %s" % ( | |
|
17 | expected, token, line_with_cursor) | |
|
18 | ) | |
|
19 | ||
|
20 | def test_simple(): | |
|
21 | cell = "foo" | |
|
22 | for i in range(len(cell)): | |
|
23 | expect_token("foo", cell, i) | |
|
24 | ||
|
25 | def test_function(): | |
|
26 | cell = "foo(a=5, b='10')" | |
|
27 | expected = 'foo' | |
|
28 | for i in (6,7,8,10,11,12): | |
|
29 | expect_token("foo", cell, i) | |
|
30 | ||
|
31 | def test_multiline(): | |
|
32 | cell = '\n'.join([ | |
|
33 | 'a = 5', | |
|
34 | 'b = hello("string", there)' | |
|
35 | ]) | |
|
36 | expected = 'hello' | |
|
37 | for i in range(4,9): | |
|
38 | expect_token(expected, cell, i, 1) | |
|
39 | expected = 'there' | |
|
40 | for i in range(21,27): | |
|
41 | expect_token(expected, cell, i, 1) | |
|
42 | ||
|
43 | def test_attrs(): | |
|
44 | cell = "foo(a=obj.attr.subattr)" | |
|
45 | expected = 'obj' | |
|
46 | idx = cell.find('obj') | |
|
47 | for i in range(idx, idx + 3): | |
|
48 | expect_token(expected, cell, i) | |
|
49 | idx = idx + 4 | |
|
50 | expected = 'obj.attr' | |
|
51 | for i in range(idx, idx + 4): | |
|
52 | expect_token(expected, cell, i) | |
|
53 | idx = idx + 5 | |
|
54 | expected = 'obj.attr.subattr' | |
|
55 | for i in range(idx, len(cell)): | |
|
56 | expect_token(expected, cell, i) |
@@ -0,0 +1,80 b'' | |||
|
1 | """Token-related utilities""" | |
|
2 | ||
|
3 | # Copyright (c) IPython Development Team. | |
|
4 | # Distributed under the terms of the Modified BSD License. | |
|
5 | ||
|
6 | from __future__ import absolute_import, print_function | |
|
7 | ||
|
8 | from collections import namedtuple | |
|
9 | from io import StringIO | |
|
10 | from keyword import iskeyword | |
|
11 | ||
|
12 | from . import tokenize2 | |
|
13 | from .py3compat import cast_unicode_py2 | |
|
14 | ||
|
15 | Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line']) | |
|
16 | ||
|
17 | def generate_tokens(readline): | |
|
18 | """wrap generate_tokens to catch EOF errors""" | |
|
19 | try: | |
|
20 | for token in tokenize2.generate_tokens(readline): | |
|
21 | yield token | |
|
22 | except tokenize2.TokenError: | |
|
23 | # catch EOF error | |
|
24 | return | |
|
25 | ||
|
26 | def token_at_cursor(cell, column, line=0): | |
|
27 | """Get the token at a given cursor | |
|
28 | ||
|
29 | Used for introspection. | |
|
30 | ||
|
31 | Parameters | |
|
32 | ---------- | |
|
33 | ||
|
34 | cell : unicode | |
|
35 | A block of Python code | |
|
36 | column : int | |
|
37 | The column of the cursor offset, where the token should be found | |
|
38 | line : int, optional | |
|
39 | The line where the token should be found (optional if cell is a single line) | |
|
40 | """ | |
|
41 | cell = cast_unicode_py2(cell) | |
|
42 | names = [] | |
|
43 | tokens = [] | |
|
44 | current_line = 0 | |
|
45 | for tup in generate_tokens(StringIO(cell).readline): | |
|
46 | ||
|
47 | tok = Token(*tup) | |
|
48 | ||
|
49 | # token, text, start, end, line = tup | |
|
50 | start_col = tok.start[1] | |
|
51 | end_col = tok.end[1] | |
|
52 | if line == current_line and start_col > column: | |
|
53 | # current token starts after the cursor, | |
|
54 | # don't consume it | |
|
55 | break | |
|
56 | ||
|
57 | if tok.token == tokenize2.NAME and not iskeyword(tok.text): | |
|
58 | if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.': | |
|
59 | names[-1] = "%s.%s" % (names[-1], tok.text) | |
|
60 | else: | |
|
61 | names.append(tok.text) | |
|
62 | elif tok.token == tokenize2.OP: | |
|
63 | if tok.text == '=' and names: | |
|
64 | # don't inspect the lhs of an assignment | |
|
65 | names.pop(-1) | |
|
66 | ||
|
67 | if line == current_line and end_col > column: | |
|
68 | # we found the cursor, stop reading | |
|
69 | break | |
|
70 | ||
|
71 | tokens.append(tok) | |
|
72 | if tok.token == tokenize2.NEWLINE: | |
|
73 | current_line += 1 | |
|
74 | ||
|
75 | if names: | |
|
76 | return names[-1] | |
|
77 | else: | |
|
78 | return '' | |
|
79 | ||
|
80 |
General Comments 0
You need to be logged in to leave comments.
Login now