##// END OF EJS Templates
don't pick up tokens right of cursor...
MinRK -
Show More
@@ -1,63 +1,70 b''
1 1 """Tests for tokenutil"""
2 2 # Copyright (c) IPython Development Team.
3 3 # Distributed under the terms of the Modified BSD License.
4 4
5 5 import nose.tools as nt
6 6
7 7 from IPython.utils.tokenutil import token_at_cursor
8 8
9 9 def expect_token(expected, cell, cursor_pos):
10 10 token = token_at_cursor(cell, cursor_pos)
11 11 offset = 0
12 12 for line in cell.splitlines():
13 13 if offset + len(line) >= cursor_pos:
14 14 break
15 15 else:
16 16 offset += len(line)
17 17 column = cursor_pos - offset
18 18 line_with_cursor = '%s|%s' % (line[:column], line[column:])
19 19 line
20 20 nt.assert_equal(token, expected,
21 "Excpected %r, got %r in: %s" % (
22 expected, token, line_with_cursor)
21 "Expected %r, got %r in: %r (pos %i)" % (
22 expected, token, line_with_cursor, cursor_pos)
23 23 )
24 24
25 25 def test_simple():
26 26 cell = "foo"
27 27 for i in range(len(cell)):
28 28 expect_token("foo", cell, i)
29 29
30 30 def test_function():
31 31 cell = "foo(a=5, b='10')"
32 32 expected = 'foo'
33 for i in (6,7,8,10,11,12):
33 # up to `foo(|a=`
34 for i in range(cell.find('a=') + 1):
35 expect_token("foo", cell, i)
36 # find foo after `=`
37 for i in [cell.find('=') + 1, cell.rfind('=') + 1]:
38 expect_token("foo", cell, i)
39 # in between `5,|` and `|b=`
40 for i in range(cell.find(','), cell.find('b=')):
34 41 expect_token("foo", cell, i)
35 42
36 43 def test_multiline():
37 44 cell = '\n'.join([
38 45 'a = 5',
39 46 'b = hello("string", there)'
40 47 ])
41 48 expected = 'hello'
42 start = cell.index(expected)
49 start = cell.index(expected) + 1
43 50 for i in range(start, start + len(expected)):
44 51 expect_token(expected, cell, i)
45 52 expected = 'there'
46 start = cell.index(expected)
53 start = cell.index(expected) + 1
47 54 for i in range(start, start + len(expected)):
48 55 expect_token(expected, cell, i)
49 56
50 57 def test_attrs():
51 58 cell = "foo(a=obj.attr.subattr)"
52 59 expected = 'obj'
53 idx = cell.find('obj')
60 idx = cell.find('obj') + 1
54 61 for i in range(idx, idx + 3):
55 62 expect_token(expected, cell, i)
56 idx = idx + 4
63 idx = cell.find('.attr') + 2
57 64 expected = 'obj.attr'
58 65 for i in range(idx, idx + 4):
59 66 expect_token(expected, cell, i)
60 idx = idx + 5
67 idx = cell.find('.subattr') + 2
61 68 expected = 'obj.attr.subattr'
62 69 for i in range(idx, len(cell)):
63 70 expect_token(expected, cell, i)
@@ -1,78 +1,80 b''
1 1 """Token-related utilities"""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6 from __future__ import absolute_import, print_function
7 7
8 8 from collections import namedtuple
9 9 from io import StringIO
10 10 from keyword import iskeyword
11 11
12 12 from . import tokenize2
13 13 from .py3compat import cast_unicode_py2
14 14
15 15 Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
16 16
17 17 def generate_tokens(readline):
18 18 """wrap generate_tokens to catch EOF errors"""
19 19 try:
20 20 for token in tokenize2.generate_tokens(readline):
21 21 yield token
22 22 except tokenize2.TokenError:
23 23 # catch EOF error
24 24 return
25 25
26 26 def token_at_cursor(cell, cursor_pos=0):
27 27 """Get the token at a given cursor
28 28
29 29 Used for introspection.
30 30
31 31 Parameters
32 32 ----------
33 33
34 34 cell : unicode
35 35 A block of Python code
36 36 cursor_pos : int
37 37 The location of the cursor in the block where the token should be found
38 38 """
39 39 cell = cast_unicode_py2(cell)
40 40 names = []
41 41 tokens = []
42 42 offset = 0
43 43 for tup in generate_tokens(StringIO(cell).readline):
44 44
45 45 tok = Token(*tup)
46 46
47 47 # token, text, start, end, line = tup
48 48 start_col = tok.start[1]
49 49 end_col = tok.end[1]
50 if offset + start_col > cursor_pos:
50 # allow '|foo' to find 'foo' at the beginning of a line
51 boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
52 if offset + start_col >= boundary:
51 53 # current token starts after the cursor,
52 54 # don't consume it
53 55 break
54 56
55 57 if tok.token == tokenize2.NAME and not iskeyword(tok.text):
56 58 if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':
57 59 names[-1] = "%s.%s" % (names[-1], tok.text)
58 60 else:
59 61 names.append(tok.text)
60 62 elif tok.token == tokenize2.OP:
61 63 if tok.text == '=' and names:
62 64 # don't inspect the lhs of an assignment
63 65 names.pop(-1)
64 66
65 67 if offset + end_col > cursor_pos:
66 68 # we found the cursor, stop reading
67 69 break
68 70
69 71 tokens.append(tok)
70 72 if tok.token == tokenize2.NEWLINE:
71 73 offset += len(tok.line)
72 74
73 75 if names:
74 76 return names[-1]
75 77 else:
76 78 return ''
77 79
78 80
General Comments 0
You need to be logged in to leave comments. Login now