##// END OF EJS Templates
prioritize function token for inspection...
Min RK -
Show More
@@ -1,75 +1,90 b''
1 1 """Tests for tokenutil"""
2 2 # Copyright (c) IPython Development Team.
3 3 # Distributed under the terms of the Modified BSD License.
4 4
5 5 import nose.tools as nt
6 6
7 7 from IPython.utils.tokenutil import token_at_cursor, line_at_cursor
8 8
9 9 def expect_token(expected, cell, cursor_pos):
10 10 token = token_at_cursor(cell, cursor_pos)
11 11 offset = 0
12 12 for line in cell.splitlines():
13 13 if offset + len(line) >= cursor_pos:
14 14 break
15 15 else:
16 16 offset += len(line)
17 17 column = cursor_pos - offset
18 18 line_with_cursor = '%s|%s' % (line[:column], line[column:])
19 19 nt.assert_equal(token, expected,
20 20 "Expected %r, got %r in: %r (pos %i)" % (
21 21 expected, token, line_with_cursor, cursor_pos)
22 22 )
23 23
24 24 def test_simple():
25 25 cell = "foo"
26 26 for i in range(len(cell)):
27 27 expect_token("foo", cell, i)
28 28
29 29 def test_function():
30 30 cell = "foo(a=5, b='10')"
31 31 expected = 'foo'
32 32 # up to `foo(|a=`
33 33 for i in range(cell.find('a=') + 1):
34 34 expect_token("foo", cell, i)
35 35 # find foo after `=`
36 36 for i in [cell.find('=') + 1, cell.rfind('=') + 1]:
37 37 expect_token("foo", cell, i)
38 38 # in between `5,|` and `|b=`
39 39 for i in range(cell.find(','), cell.find('b=')):
40 40 expect_token("foo", cell, i)
41 41
42 42 def test_multiline():
43 43 cell = '\n'.join([
44 44 'a = 5',
45 45 'b = hello("string", there)'
46 46 ])
47 47 expected = 'hello'
48 48 start = cell.index(expected) + 1
49 49 for i in range(start, start + len(expected)):
50 50 expect_token(expected, cell, i)
51 expected = 'there'
51 expected = 'hello'
52 52 start = cell.index(expected) + 1
53 53 for i in range(start, start + len(expected)):
54 54 expect_token(expected, cell, i)
55 55
56 def test_nested_call():
57 cell = "foo(bar(a=5), b=10)"
58 expected = 'foo'
59 start = cell.index('bar') + 1
60 for i in range(start, start + 3):
61 expect_token(expected, cell, i)
62 expected = 'bar'
63 start = cell.index('a=')
64 for i in range(start, start + 3):
65 expect_token(expected, cell, i)
66 expected = 'foo'
67 start = cell.index(')') + 1
68 for i in range(start, len(cell)-1):
69 expect_token(expected, cell, i)
70
56 71 def test_attrs():
57 cell = "foo(a=obj.attr.subattr)"
72 cell = "a = obj.attr.subattr"
58 73 expected = 'obj'
59 74 idx = cell.find('obj') + 1
60 75 for i in range(idx, idx + 3):
61 76 expect_token(expected, cell, i)
62 77 idx = cell.find('.attr') + 2
63 78 expected = 'obj.attr'
64 79 for i in range(idx, idx + 4):
65 80 expect_token(expected, cell, i)
66 81 idx = cell.find('.subattr') + 2
67 82 expected = 'obj.attr.subattr'
68 83 for i in range(idx, len(cell)):
69 84 expect_token(expected, cell, i)
70 85
71 86 def test_line_at_cursor():
72 87 cell = ""
73 88 (line, offset) = line_at_cursor(cell, cursor_pos=11)
74 89 assert line == "", ("Expected '', got %r" % line)
75 90 assert offset == 0, ("Expected '', got %r" % line)
@@ -1,110 +1,121 b''
1 1 """Token-related utilities"""
2 2
3 3 # Copyright (c) IPython Development Team.
4 4 # Distributed under the terms of the Modified BSD License.
5 5
6 6 from __future__ import absolute_import, print_function
7 7
8 8 from collections import namedtuple
9 9 from io import StringIO
10 10 from keyword import iskeyword
11 11
12 12 from . import tokenize2
13 13 from .py3compat import cast_unicode_py2
14 14
15 15 Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line'])
16 16
17 17 def generate_tokens(readline):
18 18 """wrap generate_tokens to catch EOF errors"""
19 19 try:
20 20 for token in tokenize2.generate_tokens(readline):
21 21 yield token
22 22 except tokenize2.TokenError:
23 23 # catch EOF error
24 24 return
25 25
26 26 def line_at_cursor(cell, cursor_pos=0):
27 27 """Return the line in a cell at a given cursor position
28 28
29 29 Used for calling line-based APIs that don't support multi-line input, yet.
30 30
31 31 Parameters
32 32 ----------
33 33
34 34 cell: text
35 35 multiline block of text
36 36 cursor_pos: integer
37 37 the cursor position
38 38
39 39 Returns
40 40 -------
41 41
42 42 (line, offset): (text, integer)
43 43 The line with the current cursor, and the character offset of the start of the line.
44 44 """
45 45 offset = 0
46 46 lines = cell.splitlines(True)
47 47 for line in lines:
48 48 next_offset = offset + len(line)
49 49 if next_offset >= cursor_pos:
50 50 break
51 51 offset = next_offset
52 52 else:
53 53 line = ""
54 54 return (line, offset)
55 55
56 56 def token_at_cursor(cell, cursor_pos=0):
57 57 """Get the token at a given cursor
58 58
59 59 Used for introspection.
60 60
61 Function calls are prioritized, so the token for the callable will be returned
62 if the cursor is anywhere inside the call.
63
61 64 Parameters
62 65 ----------
63 66
64 67 cell : unicode
65 68 A block of Python code
66 69 cursor_pos : int
67 70 The location of the cursor in the block where the token should be found
68 71 """
69 72 cell = cast_unicode_py2(cell)
70 73 names = []
71 74 tokens = []
72 75 offset = 0
76 call_names = []
73 77 for tup in generate_tokens(StringIO(cell).readline):
74 78
75 79 tok = Token(*tup)
76 80
77 81 # token, text, start, end, line = tup
78 82 start_col = tok.start[1]
79 83 end_col = tok.end[1]
80 84 # allow '|foo' to find 'foo' at the beginning of a line
81 85 boundary = cursor_pos + 1 if start_col == 0 else cursor_pos
82 86 if offset + start_col >= boundary:
83 87 # current token starts after the cursor,
84 88 # don't consume it
85 89 break
86 90
87 91 if tok.token == tokenize2.NAME and not iskeyword(tok.text):
88 92 if names and tokens and tokens[-1].token == tokenize2.OP and tokens[-1].text == '.':
89 93 names[-1] = "%s.%s" % (names[-1], tok.text)
90 94 else:
91 95 names.append(tok.text)
92 96 elif tok.token == tokenize2.OP:
93 97 if tok.text == '=' and names:
94 98 # don't inspect the lhs of an assignment
95 99 names.pop(-1)
100 if tok.text == '(' and names:
101 # if we are inside a function call, inspect the function
102 call_names.append(names[-1])
103 elif tok.text == ')' and call_names:
104 call_names.pop(-1)
96 105
97 106 if offset + end_col > cursor_pos:
98 107 # we found the cursor, stop reading
99 108 break
100 109
101 110 tokens.append(tok)
102 111 if tok.token == tokenize2.NEWLINE:
103 112 offset += len(tok.line)
104 113
105 if names:
114 if call_names:
115 return call_names[-1]
116 elif names:
106 117 return names[-1]
107 118 else:
108 119 return ''
109 120
110 121
General Comments 0
You need to be logged in to leave comments. Login now