Show More
@@ -1,63 +1,70 b'' | |||||
1 | """Tests for tokenutil""" |
|
1 | """Tests for tokenutil""" | |
2 | # Copyright (c) IPython Development Team. |
|
2 | # Copyright (c) IPython Development Team. | |
3 | # Distributed under the terms of the Modified BSD License. |
|
3 | # Distributed under the terms of the Modified BSD License. | |
4 |
|
4 | |||
5 | import nose.tools as nt |
|
5 | import nose.tools as nt | |
6 |
|
6 | |||
7 | from IPython.utils.tokenutil import token_at_cursor |
|
7 | from IPython.utils.tokenutil import token_at_cursor | |
8 |
|
8 | |||
9 | def expect_token(expected, cell, cursor_pos): |
|
9 | def expect_token(expected, cell, cursor_pos): | |
10 | token = token_at_cursor(cell, cursor_pos) |
|
10 | token = token_at_cursor(cell, cursor_pos) | |
11 | offset = 0 |
|
11 | offset = 0 | |
12 | for line in cell.splitlines(): |
|
12 | for line in cell.splitlines(): | |
13 | if offset + len(line) >= cursor_pos: |
|
13 | if offset + len(line) >= cursor_pos: | |
14 | break |
|
14 | break | |
15 | else: |
|
15 | else: | |
16 | offset += len(line) |
|
16 | offset += len(line) | |
17 | column = cursor_pos - offset |
|
17 | column = cursor_pos - offset | |
18 | line_with_cursor = '%s|%s' % (line[:column], line[column:]) |
|
18 | line_with_cursor = '%s|%s' % (line[:column], line[column:]) | |
19 | line |
|
19 | line | |
20 | nt.assert_equal(token, expected, |
|
20 | nt.assert_equal(token, expected, | |
21 |
"Ex |
|
21 | "Expected %r, got %r in: %r (pos %i)" % ( | |
22 | expected, token, line_with_cursor) |
|
22 | expected, token, line_with_cursor, cursor_pos) | |
23 | ) |
|
23 | ) | |
24 |
|
24 | |||
25 | def test_simple(): |
|
25 | def test_simple(): | |
26 | cell = "foo" |
|
26 | cell = "foo" | |
27 | for i in range(len(cell)): |
|
27 | for i in range(len(cell)): | |
28 | expect_token("foo", cell, i) |
|
28 | expect_token("foo", cell, i) | |
29 |
|
29 | |||
30 | def test_function(): |
|
30 | def test_function(): | |
31 | cell = "foo(a=5, b='10')" |
|
31 | cell = "foo(a=5, b='10')" | |
32 | expected = 'foo' |
|
32 | expected = 'foo' | |
33 | for i in (6,7,8,10,11,12): |
|
33 | # up to `foo(|a=` | |
|
34 | for i in range(cell.find('a=') + 1): | |||
|
35 | expect_token("foo", cell, i) | |||
|
36 | # find foo after `=` | |||
|
37 | for i in [cell.find('=') + 1, cell.rfind('=') + 1]: | |||
|
38 | expect_token("foo", cell, i) | |||
|
39 | # in between `5,|` and `|b=` | |||
|
40 | for i in range(cell.find(','), cell.find('b=')): | |||
34 | expect_token("foo", cell, i) |
|
41 | expect_token("foo", cell, i) | |
35 |
|
42 | |||
36 | def test_multiline(): |
|
43 | def test_multiline(): | |
37 | cell = '\n'.join([ |
|
44 | cell = '\n'.join([ | |
38 | 'a = 5', |
|
45 | 'a = 5', | |
39 | 'b = hello("string", there)' |
|
46 | 'b = hello("string", there)' | |
40 | ]) |
|
47 | ]) | |
41 | expected = 'hello' |
|
48 | expected = 'hello' | |
42 | start = cell.index(expected) |
|
49 | start = cell.index(expected) + 1 | |
43 | for i in range(start, start + len(expected)): |
|
50 | for i in range(start, start + len(expected)): | |
44 | expect_token(expected, cell, i) |
|
51 | expect_token(expected, cell, i) | |
45 | expected = 'there' |
|
52 | expected = 'there' | |
46 | start = cell.index(expected) |
|
53 | start = cell.index(expected) + 1 | |
47 | for i in range(start, start + len(expected)): |
|
54 | for i in range(start, start + len(expected)): | |
48 | expect_token(expected, cell, i) |
|
55 | expect_token(expected, cell, i) | |
49 |
|
56 | |||
50 | def test_attrs(): |
|
57 | def test_attrs(): | |
51 | cell = "foo(a=obj.attr.subattr)" |
|
58 | cell = "foo(a=obj.attr.subattr)" | |
52 | expected = 'obj' |
|
59 | expected = 'obj' | |
53 | idx = cell.find('obj') |
|
60 | idx = cell.find('obj') + 1 | |
54 | for i in range(idx, idx + 3): |
|
61 | for i in range(idx, idx + 3): | |
55 | expect_token(expected, cell, i) |
|
62 | expect_token(expected, cell, i) | |
56 |
idx = |
|
63 | idx = cell.find('.attr') + 2 | |
57 | expected = 'obj.attr' |
|
64 | expected = 'obj.attr' | |
58 | for i in range(idx, idx + 4): |
|
65 | for i in range(idx, idx + 4): | |
59 | expect_token(expected, cell, i) |
|
66 | expect_token(expected, cell, i) | |
60 | idx = idx + 5 |
|
67 | idx = cell.find('.subattr') + 2 | |
61 | expected = 'obj.attr.subattr' |
|
68 | expected = 'obj.attr.subattr' | |
62 | for i in range(idx, len(cell)): |
|
69 | for i in range(idx, len(cell)): | |
63 | expect_token(expected, cell, i) |
|
70 | expect_token(expected, cell, i) |
@@ -1,78 +1,80 b'' | |||||
1 | """Token-related utilities""" |
|
1 | """Token-related utilities""" | |
2 |
|
2 | |||
3 | # Copyright (c) IPython Development Team. |
|
3 | # Copyright (c) IPython Development Team. | |
4 | # Distributed under the terms of the Modified BSD License. |
|
4 | # Distributed under the terms of the Modified BSD License. | |
5 |
|
5 | |||
6 | from __future__ import absolute_import, print_function |
|
6 | from __future__ import absolute_import, print_function | |
7 |
|
7 | |||
8 | from collections import namedtuple |
|
8 | from collections import namedtuple | |
9 | from io import StringIO |
|
9 | from io import StringIO | |
10 | from keyword import iskeyword |
|
10 | from keyword import iskeyword | |
11 |
|
11 | |||
12 | from . import tokenize2 |
|
12 | from . import tokenize2 | |
13 | from .py3compat import cast_unicode_py2 |
|
13 | from .py3compat import cast_unicode_py2 | |
14 |
|
14 | |||
15 | Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line']) |
|
15 | Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line']) | |
16 |
|
16 | |||
17 | def generate_tokens(readline): |
|
17 | def generate_tokens(readline): | |
18 | """wrap generate_tokens to catch EOF errors""" |
|
18 | """wrap generate_tokens to catch EOF errors""" | |
19 | try: |
|
19 | try: | |
20 | for token in tokenize2.generate_tokens(readline): |
|
20 | for token in tokenize2.generate_tokens(readline): | |
21 | yield token |
|
21 | yield token | |
22 | except tokenize2.TokenError: |
|
22 | except tokenize2.TokenError: | |
23 | # catch EOF error |
|
23 | # catch EOF error | |
24 | return |
|
24 | return | |
25 |
|
25 | |||
26 | def token_at_cursor(cell, cursor_pos=0): |
|
26 | def token_at_cursor(cell, cursor_pos=0): | |
27 | """Get the token at a given cursor |
|
27 | """Get the token at a given cursor | |
28 |
|
28 | |||
29 | Used for introspection. |
|
29 | Used for introspection. | |
30 |
|
30 | |||
31 | Parameters |
|
31 | Parameters | |
32 | ---------- |
|
32 | ---------- | |
33 |
|
33 | |||
34 | cell : unicode |
|
34 | cell : unicode | |
35 | A block of Python code |
|
35 | A block of Python code | |
36 | cursor_pos : int |
|
36 | cursor_pos : int | |
37 | The location of the cursor in the block where the token should be found |
|
37 | The location of the cursor in the block where the token should be found | |
38 | """ |
|
38 | """ | |
39 | cell = cast_unicode_py2(cell) |
|
39 | cell = cast_unicode_py2(cell) | |
40 | names = [] |
|
40 | names = [] | |
41 | tokens = [] |
|
41 | tokens = [] | |
42 | offset = 0 |
|
42 | offset = 0 | |
43 | for tup in generate_tokens(StringIO(cell).readline): |
|
43 | for tup in generate_tokens(StringIO(cell).readline): | |
44 |
|
44 | |||
45 | tok = Token(*tup) |
|
45 | tok = Token(*tup) | |
46 |
|
46 | |||
47 | # token, text, start, end, line = tup |
|
47 | # token, text, start, end, line = tup | |
48 | start_col = tok.start[1] |
|
48 | start_col = tok.start[1] | |
49 | end_col = tok.end[1] |
|
49 | end_col = tok.end[1] | |
50 | if offset + start_col > cursor_pos: |
|
50 | # allow '|foo' to find 'foo' at the beginning of a line | |
|
51 | boundary = cursor_pos + 1 if start_col == 0 else cursor_pos | |||
|
52 | if offset + start_col >= boundary: | |||
51 | # current token starts after the cursor, |
|
53 | # current token starts after the cursor, | |
52 | # don't consume it |
|
54 | # don't consume it | |
53 | break |
|
55 | break | |
54 |
|
56 | |||
55 | if tok.token == tokenize2.NAME and not iskeyword(tok.text): |
|
57 | if tok.token == tokenize2.NAME and not iskeyword(tok.text): | |
56 | if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.': |
|
58 | if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.': | |
57 | names[-1] = "%s.%s" % (names[-1], tok.text) |
|
59 | names[-1] = "%s.%s" % (names[-1], tok.text) | |
58 | else: |
|
60 | else: | |
59 | names.append(tok.text) |
|
61 | names.append(tok.text) | |
60 | elif tok.token == tokenize2.OP: |
|
62 | elif tok.token == tokenize2.OP: | |
61 | if tok.text == '=' and names: |
|
63 | if tok.text == '=' and names: | |
62 | # don't inspect the lhs of an assignment |
|
64 | # don't inspect the lhs of an assignment | |
63 | names.pop(-1) |
|
65 | names.pop(-1) | |
64 |
|
66 | |||
65 | if offset + end_col > cursor_pos: |
|
67 | if offset + end_col > cursor_pos: | |
66 | # we found the cursor, stop reading |
|
68 | # we found the cursor, stop reading | |
67 | break |
|
69 | break | |
68 |
|
70 | |||
69 | tokens.append(tok) |
|
71 | tokens.append(tok) | |
70 | if tok.token == tokenize2.NEWLINE: |
|
72 | if tok.token == tokenize2.NEWLINE: | |
71 | offset += len(tok.line) |
|
73 | offset += len(tok.line) | |
72 |
|
74 | |||
73 | if names: |
|
75 | if names: | |
74 | return names[-1] |
|
76 | return names[-1] | |
75 | else: |
|
77 | else: | |
76 | return '' |
|
78 | return '' | |
77 |
|
79 | |||
78 |
|
80 |
General Comments 0
You need to be logged in to leave comments.
Login now