Show More
@@ -1,57 +1,74 b'' | |||||
1 | # System library imports |
|
1 | # System library imports | |
2 | from pygments.token import Token, is_token_subtype |
|
2 | from pygments.token import Token, is_token_subtype | |
3 |
|
3 | |||
4 |
|
4 | |||
5 | class CompletionLexer(object): |
|
5 | class CompletionLexer(object): | |
6 | """ Uses Pygments and some auxillary information to lex code snippets for |
|
6 | """ Uses Pygments and some auxillary information to lex code snippets for | |
7 | symbol contexts. |
|
7 | symbol contexts. | |
8 | """ |
|
8 | """ | |
9 |
|
9 | |||
10 | # Maps Lexer names to a list of possible name separators |
|
10 | # Maps Lexer names to a list of possible name separators | |
11 | separator_map = { 'C' : [ '.', '->' ], |
|
11 | separator_map = { 'C' : [ '.', '->' ], | |
12 | 'C++' : [ '.', '->', '::' ], |
|
12 | 'C++' : [ '.', '->', '::' ], | |
13 | 'Python' : [ '.' ] } |
|
13 | 'Python' : [ '.' ] } | |
14 |
|
14 | |||
15 | def __init__(self, lexer): |
|
15 | def __init__(self, lexer): | |
|
16 | """ Create a CompletionLexer using the specified Pygments lexer. | |||
|
17 | """ | |||
16 | self.lexer = lexer |
|
18 | self.lexer = lexer | |
17 |
|
19 | |||
18 | def get_context(self, string): |
|
20 | def get_context(self, string): | |
19 | """ Assuming the cursor is at the end of the specified string, get the |
|
21 | """ Assuming the cursor is at the end of the specified string, get the | |
20 | context (a list of names) for the symbol at cursor position. |
|
22 | context (a list of names) for the symbol at cursor position. | |
21 | """ |
|
23 | """ | |
22 | context = [] |
|
24 | context = [] | |
23 | reversed_tokens = list(self._lexer.get_tokens(string)) |
|
25 | reversed_tokens = list(self._lexer.get_tokens(string)) | |
24 | reversed_tokens.reverse() |
|
26 | reversed_tokens.reverse() | |
25 |
|
27 | |||
26 | # Pygments often tacks on a newline when none is specified in the input |
|
28 | # Pygments often tacks on a newline when none is specified in the input. | |
|
29 | # Remove this newline. | |||
27 | if reversed_tokens and reversed_tokens[0][1].endswith('\n') and \ |
|
30 | if reversed_tokens and reversed_tokens[0][1].endswith('\n') and \ | |
28 | not string.endswith('\n'): |
|
31 | not string.endswith('\n'): | |
29 | reversed_tokens.pop(0) |
|
32 | reversed_tokens.pop(0) | |
30 |
|
33 | |||
31 | current_op = unicode() |
|
34 | current_op = unicode() | |
32 | for token, text in reversed_tokens: |
|
35 | for token, text in reversed_tokens: | |
33 | if is_token_subtype(token, Token.Name) and \ |
|
36 | ||
34 | (not context or current_op in self._name_separators): |
|
37 | if is_token_subtype(token, Token.Name): | |
35 | if not context and current_op in self._name_separators: |
|
38 | ||
36 | context.insert(0, unicode()) |
|
39 | # Handle a trailing separator, e.g 'foo.bar.' | |
|
40 | if current_op in self._name_separators: | |||
|
41 | if not context: | |||
|
42 | context.insert(0, unicode()) | |||
|
43 | ||||
|
44 | # Handle non-separator operators and punction. | |||
|
45 | elif current_op: | |||
|
46 | break | |||
|
47 | ||||
37 | context.insert(0, text) |
|
48 | context.insert(0, text) | |
38 | current_op = unicode() |
|
49 | current_op = unicode() | |
|
50 | ||||
|
51 | # Pygments doesn't understand that, e.g., '->' is a single operator | |||
|
52 | # in C++. This is why we have to build up an operator from | |||
|
53 | # potentially several tokens. | |||
39 | elif token is Token.Operator or token is Token.Punctuation: |
|
54 | elif token is Token.Operator or token is Token.Punctuation: | |
40 | current_op = text + current_op |
|
55 | current_op = text + current_op | |
|
56 | ||||
|
57 | # Break on anything that is not a Operator, Punctuation, or Name. | |||
41 | else: |
|
58 | else: | |
42 | break |
|
59 | break | |
43 |
|
60 | |||
44 | return context |
|
61 | return context | |
45 |
|
62 | |||
46 | def get_lexer(self, lexer): |
|
63 | def get_lexer(self, lexer): | |
47 | return self._lexer |
|
64 | return self._lexer | |
48 |
|
65 | |||
49 | def set_lexer(self, lexer, name_separators=None): |
|
66 | def set_lexer(self, lexer, name_separators=None): | |
50 | self._lexer = lexer |
|
67 | self._lexer = lexer | |
51 | if name_separators is None: |
|
68 | if name_separators is None: | |
52 | self._name_separators = self.separator_map.get(lexer.name, ['.']) |
|
69 | self._name_separators = self.separator_map.get(lexer.name, ['.']) | |
53 | else: |
|
70 | else: | |
54 | self._name_separators = list(name_separators) |
|
71 | self._name_separators = list(name_separators) | |
55 |
|
72 | |||
56 | lexer = property(get_lexer, set_lexer) |
|
73 | lexer = property(get_lexer, set_lexer) | |
57 |
|
74 |
@@ -1,38 +1,47 b'' | |||||
1 | # Standard library imports |
|
1 | # Standard library imports | |
2 | import unittest |
|
2 | import unittest | |
3 |
|
3 | |||
4 | # System library imports |
|
4 | # System library imports | |
5 | from pygments.lexers import CLexer, CppLexer, PythonLexer |
|
5 | from pygments.lexers import CLexer, CppLexer, PythonLexer | |
6 |
|
6 | |||
7 | # Local imports |
|
7 | # Local imports | |
8 | from IPython.frontend.qt.console.completion_lexer import CompletionLexer |
|
8 | from IPython.frontend.qt.console.completion_lexer import CompletionLexer | |
9 |
|
9 | |||
10 |
|
10 | |||
11 | class TestCompletionLexer(unittest.TestCase): |
|
11 | class TestCompletionLexer(unittest.TestCase): | |
12 |
|
12 | |||
13 | def testPython(self): |
|
13 | def testPython(self): | |
14 | """ Does the CompletionLexer work for Python? |
|
14 | """ Does the CompletionLexer work for Python? | |
15 | """ |
|
15 | """ | |
16 | lexer = CompletionLexer(PythonLexer()) |
|
16 | lexer = CompletionLexer(PythonLexer()) | |
17 |
|
17 | |||
|
18 | # Test simplest case. | |||
18 | self.assertEquals(lexer.get_context("foo.bar.baz"), |
|
19 | self.assertEquals(lexer.get_context("foo.bar.baz"), | |
19 | [ "foo", "bar", "baz" ]) |
|
20 | [ "foo", "bar", "baz" ]) | |
|
21 | ||||
|
22 | # Test trailing period. | |||
20 | self.assertEquals(lexer.get_context("foo.bar."), [ "foo", "bar", "" ]) |
|
23 | self.assertEquals(lexer.get_context("foo.bar."), [ "foo", "bar", "" ]) | |
21 |
|
24 | |||
|
25 | # Test with prompt present. | |||
22 | self.assertEquals(lexer.get_context(">>> foo.bar.baz"), |
|
26 | self.assertEquals(lexer.get_context(">>> foo.bar.baz"), | |
23 | [ "foo", "bar", "baz" ]) |
|
27 | [ "foo", "bar", "baz" ]) | |
|
28 | ||||
|
29 | # Test spacing in name. | |||
24 | self.assertEquals(lexer.get_context("foo.bar. baz"), [ "baz" ]) |
|
30 | self.assertEquals(lexer.get_context("foo.bar. baz"), [ "baz" ]) | |
25 |
|
31 | |||
|
32 | # Test parenthesis. | |||
|
33 | self.assertEquals(lexer.get_context("foo("), []) | |||
|
34 | ||||
26 | def testC(self): |
|
35 | def testC(self): | |
27 | """ Does the CompletionLexer work for C/C++? |
|
36 | """ Does the CompletionLexer work for C/C++? | |
28 | """ |
|
37 | """ | |
29 | lexer = CompletionLexer(CLexer()) |
|
38 | lexer = CompletionLexer(CLexer()) | |
30 | self.assertEquals(lexer.get_context("foo.bar"), [ "foo", "bar" ]) |
|
39 | self.assertEquals(lexer.get_context("foo.bar"), [ "foo", "bar" ]) | |
31 | self.assertEquals(lexer.get_context("foo->bar"), [ "foo", "bar" ]) |
|
40 | self.assertEquals(lexer.get_context("foo->bar"), [ "foo", "bar" ]) | |
32 |
|
41 | |||
33 | lexer = CompletionLexer(CppLexer()) |
|
42 | lexer = CompletionLexer(CppLexer()) | |
34 | self.assertEquals(lexer.get_context("Foo::Bar"), [ "Foo", "Bar" ]) |
|
43 | self.assertEquals(lexer.get_context("Foo::Bar"), [ "Foo", "Bar" ]) | |
35 |
|
44 | |||
36 |
|
45 | |||
37 | if __name__ == '__main__': |
|
46 | if __name__ == '__main__': | |
38 | unittest.main() |
|
47 | unittest.main() |
General Comments 0
You need to be logged in to leave comments.
Login now