Show More
@@ -1,106 +1,110 b'' | |||||
1 | """prompt-toolkit utilities |
|
1 | """prompt-toolkit utilities | |
2 |
|
2 | |||
3 | Everything in this module is a private API, |
|
3 | Everything in this module is a private API, | |
4 | not to be used outside IPython. |
|
4 | not to be used outside IPython. | |
5 | """ |
|
5 | """ | |
6 |
|
6 | |||
7 | # Copyright (c) IPython Development Team. |
|
7 | # Copyright (c) IPython Development Team. | |
8 | # Distributed under the terms of the Modified BSD License. |
|
8 | # Distributed under the terms of the Modified BSD License. | |
9 |
|
9 | |||
10 | import unicodedata |
|
10 | import unicodedata | |
11 | from wcwidth import wcwidth |
|
11 | from wcwidth import wcwidth | |
12 |
|
12 | |||
13 | from IPython.core.completer import IPCompleter |
|
13 | from IPython.core.completer import IPCompleter | |
14 | from prompt_toolkit.completion import Completer, Completion |
|
14 | from prompt_toolkit.completion import Completer, Completion | |
15 | from prompt_toolkit.layout.lexers import Lexer |
|
15 | from prompt_toolkit.layout.lexers import Lexer | |
16 | from prompt_toolkit.layout.lexers import PygmentsLexer |
|
16 | from prompt_toolkit.layout.lexers import PygmentsLexer | |
17 |
|
17 | |||
18 | import pygments.lexers as pygments_lexers |
|
18 | import pygments.lexers as pygments_lexers | |
19 |
|
19 | |||
20 |
|
20 | |||
21 | class IPythonPTCompleter(Completer): |
|
21 | class IPythonPTCompleter(Completer): | |
22 | """Adaptor to provide IPython completions to prompt_toolkit""" |
|
22 | """Adaptor to provide IPython completions to prompt_toolkit""" | |
23 | def __init__(self, ipy_completer=None, shell=None): |
|
23 | def __init__(self, ipy_completer=None, shell=None): | |
24 | if shell is None and ipy_completer is None: |
|
24 | if shell is None and ipy_completer is None: | |
25 | raise TypeError("Please pass shell=an InteractiveShell instance.") |
|
25 | raise TypeError("Please pass shell=an InteractiveShell instance.") | |
26 | self._ipy_completer = ipy_completer |
|
26 | self._ipy_completer = ipy_completer | |
27 | self.shell = shell |
|
27 | self.shell = shell | |
28 |
|
28 | |||
29 | @property |
|
29 | @property | |
30 | def ipy_completer(self): |
|
30 | def ipy_completer(self): | |
31 | if self._ipy_completer: |
|
31 | if self._ipy_completer: | |
32 | return self._ipy_completer |
|
32 | return self._ipy_completer | |
33 | else: |
|
33 | else: | |
34 | return self.shell.Completer |
|
34 | return self.shell.Completer | |
35 |
|
35 | |||
36 | def get_completions(self, document, complete_event): |
|
36 | def get_completions(self, document, complete_event): | |
37 | if not document.current_line.strip(): |
|
37 | if not document.current_line.strip(): | |
38 | return |
|
38 | return | |
39 |
|
39 | |||
|
40 | # Some bits of our completion system may print stuff (e.g. if a module | |||
|
41 | # is imported). This context manager ensures that doesn't interfere with | |||
|
42 | # the prompt. | |||
|
43 | with self.shell.pt_cli.patch_stdout_context(): | |||
40 | used, matches = self.ipy_completer.complete( |
|
44 | used, matches = self.ipy_completer.complete( | |
41 | line_buffer=document.current_line, |
|
45 | line_buffer=document.current_line, | |
42 | cursor_pos=document.cursor_position_col |
|
46 | cursor_pos=document.cursor_position_col | |
43 | ) |
|
47 | ) | |
44 | start_pos = -len(used) |
|
48 | start_pos = -len(used) | |
45 | for m in matches: |
|
49 | for m in matches: | |
46 | if not m: |
|
50 | if not m: | |
47 | # Guard against completion machinery giving us an empty string. |
|
51 | # Guard against completion machinery giving us an empty string. | |
48 | continue |
|
52 | continue | |
49 |
|
53 | |||
50 | m = unicodedata.normalize('NFC', m) |
|
54 | m = unicodedata.normalize('NFC', m) | |
51 |
|
55 | |||
52 | # When the first character of the completion has a zero length, |
|
56 | # When the first character of the completion has a zero length, | |
53 | # then it's probably a decomposed unicode character. E.g. caused by |
|
57 | # then it's probably a decomposed unicode character. E.g. caused by | |
54 | # the "\dot" completion. Try to compose again with the previous |
|
58 | # the "\dot" completion. Try to compose again with the previous | |
55 | # character. |
|
59 | # character. | |
56 | if wcwidth(m[0]) == 0: |
|
60 | if wcwidth(m[0]) == 0: | |
57 | if document.cursor_position + start_pos > 0: |
|
61 | if document.cursor_position + start_pos > 0: | |
58 | char_before = document.text[document.cursor_position + start_pos - 1] |
|
62 | char_before = document.text[document.cursor_position + start_pos - 1] | |
59 | m = unicodedata.normalize('NFC', char_before + m) |
|
63 | m = unicodedata.normalize('NFC', char_before + m) | |
60 |
|
64 | |||
61 | # Yield the modified completion instead, if this worked. |
|
65 | # Yield the modified completion instead, if this worked. | |
62 | if wcwidth(m[0:1]) == 1: |
|
66 | if wcwidth(m[0:1]) == 1: | |
63 | yield Completion(m, start_position=start_pos - 1) |
|
67 | yield Completion(m, start_position=start_pos - 1) | |
64 | continue |
|
68 | continue | |
65 |
|
69 | |||
66 | # TODO: Use Jedi to determine meta_text |
|
70 | # TODO: Use Jedi to determine meta_text | |
67 | # (Jedi currently has a bug that results in incorrect information.) |
|
71 | # (Jedi currently has a bug that results in incorrect information.) | |
68 | # meta_text = '' |
|
72 | # meta_text = '' | |
69 | # yield Completion(m, start_position=start_pos, |
|
73 | # yield Completion(m, start_position=start_pos, | |
70 | # display_meta=meta_text) |
|
74 | # display_meta=meta_text) | |
71 | yield Completion(m, start_position=start_pos) |
|
75 | yield Completion(m, start_position=start_pos) | |
72 |
|
76 | |||
73 | class IPythonPTLexer(Lexer): |
|
77 | class IPythonPTLexer(Lexer): | |
74 | """ |
|
78 | """ | |
75 | Wrapper around PythonLexer and BashLexer. |
|
79 | Wrapper around PythonLexer and BashLexer. | |
76 | """ |
|
80 | """ | |
77 | def __init__(self): |
|
81 | def __init__(self): | |
78 | l = pygments_lexers |
|
82 | l = pygments_lexers | |
79 | self.python_lexer = PygmentsLexer(l.Python3Lexer) |
|
83 | self.python_lexer = PygmentsLexer(l.Python3Lexer) | |
80 | self.shell_lexer = PygmentsLexer(l.BashLexer) |
|
84 | self.shell_lexer = PygmentsLexer(l.BashLexer) | |
81 |
|
85 | |||
82 | self.magic_lexers = { |
|
86 | self.magic_lexers = { | |
83 | 'HTML': PygmentsLexer(l.HtmlLexer), |
|
87 | 'HTML': PygmentsLexer(l.HtmlLexer), | |
84 | 'html': PygmentsLexer(l.HtmlLexer), |
|
88 | 'html': PygmentsLexer(l.HtmlLexer), | |
85 | 'javascript': PygmentsLexer(l.JavascriptLexer), |
|
89 | 'javascript': PygmentsLexer(l.JavascriptLexer), | |
86 | 'js': PygmentsLexer(l.JavascriptLexer), |
|
90 | 'js': PygmentsLexer(l.JavascriptLexer), | |
87 | 'perl': PygmentsLexer(l.PerlLexer), |
|
91 | 'perl': PygmentsLexer(l.PerlLexer), | |
88 | 'ruby': PygmentsLexer(l.RubyLexer), |
|
92 | 'ruby': PygmentsLexer(l.RubyLexer), | |
89 | 'latex': PygmentsLexer(l.TexLexer), |
|
93 | 'latex': PygmentsLexer(l.TexLexer), | |
90 | } |
|
94 | } | |
91 |
|
95 | |||
92 | def lex_document(self, cli, document): |
|
96 | def lex_document(self, cli, document): | |
93 | text = document.text.lstrip() |
|
97 | text = document.text.lstrip() | |
94 |
|
98 | |||
95 | lexer = self.python_lexer |
|
99 | lexer = self.python_lexer | |
96 |
|
100 | |||
97 | if text.startswith('!') or text.startswith('%%bash'): |
|
101 | if text.startswith('!') or text.startswith('%%bash'): | |
98 | lexer = self.shell_lexer |
|
102 | lexer = self.shell_lexer | |
99 |
|
103 | |||
100 | elif text.startswith('%%'): |
|
104 | elif text.startswith('%%'): | |
101 | for magic, l in self.magic_lexers.items(): |
|
105 | for magic, l in self.magic_lexers.items(): | |
102 | if text.startswith('%%' + magic): |
|
106 | if text.startswith('%%' + magic): | |
103 | lexer = l |
|
107 | lexer = l | |
104 | break |
|
108 | break | |
105 |
|
109 | |||
106 | return lexer.lex_document(cli, document) |
|
110 | return lexer.lex_document(cli, document) |
General Comments 0
You need to be logged in to leave comments.
Login now