Show More
@@ -1,197 +1,204 b'' | |||||
1 | """prompt-toolkit utilities |
|
1 | """prompt-toolkit utilities | |
2 |
|
2 | |||
3 | Everything in this module is a private API, |
|
3 | Everything in this module is a private API, | |
4 | not to be used outside IPython. |
|
4 | not to be used outside IPython. | |
5 | """ |
|
5 | """ | |
6 |
|
6 | |||
7 | # Copyright (c) IPython Development Team. |
|
7 | # Copyright (c) IPython Development Team. | |
8 | # Distributed under the terms of the Modified BSD License. |
|
8 | # Distributed under the terms of the Modified BSD License. | |
9 |
|
9 | |||
10 | import unicodedata |
|
10 | import unicodedata | |
11 | from wcwidth import wcwidth |
|
11 | from wcwidth import wcwidth | |
12 |
|
12 | |||
13 | from IPython.core.completer import ( |
|
13 | from IPython.core.completer import ( | |
14 | provisionalcompleter, cursor_to_position, |
|
14 | provisionalcompleter, cursor_to_position, | |
15 | _deduplicate_completions) |
|
15 | _deduplicate_completions) | |
16 | from prompt_toolkit.completion import Completer, Completion |
|
16 | from prompt_toolkit.completion import Completer, Completion | |
17 | from prompt_toolkit.lexers import Lexer |
|
17 | from prompt_toolkit.lexers import Lexer | |
18 | from prompt_toolkit.lexers import PygmentsLexer |
|
18 | from prompt_toolkit.lexers import PygmentsLexer | |
19 | from prompt_toolkit.patch_stdout import patch_stdout |
|
19 | from prompt_toolkit.patch_stdout import patch_stdout | |
20 |
|
20 | |||
21 | import pygments.lexers as pygments_lexers |
|
21 | import pygments.lexers as pygments_lexers | |
22 | import os |
|
22 | import os | |
23 | import sys |
|
23 | import sys | |
24 | import traceback |
|
24 | import traceback | |
25 |
|
25 | |||
26 | _completion_sentinel = object() |
|
26 | _completion_sentinel = object() | |
27 |
|
27 | |||
28 | def _elide_point(string:str, *, min_elide=30)->str: |
|
28 | def _elide_point(string:str, *, min_elide=30)->str: | |
29 | """ |
|
29 | """ | |
30 | If a string is long enough, and has at least 3 dots, |
|
30 | If a string is long enough, and has at least 3 dots, | |
31 | replace the middle part with ellipses. |
|
31 | replace the middle part with ellipses. | |
32 |
|
32 | |||
33 | If a string naming a file is long enough, and has at least 3 slashes, |
|
33 | If a string naming a file is long enough, and has at least 3 slashes, | |
34 | replace the middle part with ellipses. |
|
34 | replace the middle part with ellipses. | |
35 |
|
35 | |||
36 | If three consecutive dots, or two consecutive dots are encountered these are |
|
36 | If three consecutive dots, or two consecutive dots are encountered these are | |
37 | replaced by the equivalents HORIZONTAL ELLIPSIS or TWO DOT LEADER unicode |
|
37 | replaced by the equivalents HORIZONTAL ELLIPSIS or TWO DOT LEADER unicode | |
38 | equivalents |
|
38 | equivalents | |
39 | """ |
|
39 | """ | |
40 | string = string.replace('...','\N{HORIZONTAL ELLIPSIS}') |
|
40 | string = string.replace('...','\N{HORIZONTAL ELLIPSIS}') | |
41 | string = string.replace('..','\N{TWO DOT LEADER}') |
|
41 | string = string.replace('..','\N{TWO DOT LEADER}') | |
42 | if len(string) < min_elide: |
|
42 | if len(string) < min_elide: | |
43 | return string |
|
43 | return string | |
44 |
|
44 | |||
45 | object_parts = string.split('.') |
|
45 | object_parts = string.split('.') | |
46 | file_parts = string.split(os.sep) |
|
46 | file_parts = string.split(os.sep) | |
47 | if file_parts[-1] == '': |
|
47 | if file_parts[-1] == '': | |
48 | file_parts.pop() |
|
48 | file_parts.pop() | |
49 |
|
49 | |||
50 | if len(object_parts) > 3: |
|
50 | if len(object_parts) > 3: | |
51 | return '{}.{}\N{HORIZONTAL ELLIPSIS}{}.{}'.format(object_parts[0], object_parts[1][:1], object_parts[-2][-1:], object_parts[-1]) |
|
51 | return "{}.{}\N{HORIZONTAL ELLIPSIS}{}.{}".format( | |
|
52 | object_parts[0], | |||
|
53 | object_parts[1][:1], | |||
|
54 | object_parts[-2][-1:], | |||
|
55 | object_parts[-1], | |||
|
56 | ) | |||
52 |
|
57 | |||
53 | elif len(file_parts) > 3: |
|
58 | elif len(file_parts) > 3: | |
54 |
return ( |
|
59 | return ("{}" + os.sep + "{}\N{HORIZONTAL ELLIPSIS}{}" + os.sep + "{}").format( | |
|
60 | file_parts[0], file_parts[1][:1], file_parts[-2][-1:], file_parts[-1] | |||
|
61 | ) | |||
55 |
|
62 | |||
56 | return string |
|
63 | return string | |
57 |
|
64 | |||
58 | def _elide_typed(string:str, typed:str, *, min_elide:int=30)->str: |
|
65 | def _elide_typed(string:str, typed:str, *, min_elide:int=30)->str: | |
59 | """ |
|
66 | """ | |
60 | Elide the middle of a long string if the beginning has already been typed. |
|
67 | Elide the middle of a long string if the beginning has already been typed. | |
61 | """ |
|
68 | """ | |
62 |
|
69 | |||
63 | if len(string) < min_elide: |
|
70 | if len(string) < min_elide: | |
64 | return string |
|
71 | return string | |
65 | cut_how_much = len(typed)-3 |
|
72 | cut_how_much = len(typed)-3 | |
66 | if cut_how_much < 7: |
|
73 | if cut_how_much < 7: | |
67 | return string |
|
74 | return string | |
68 | if string.startswith(typed) and len(string)> len(typed): |
|
75 | if string.startswith(typed) and len(string)> len(typed): | |
69 | return f"{string[:3]}\N{HORIZONTAL ELLIPSIS}{string[cut_how_much:]}" |
|
76 | return f"{string[:3]}\N{HORIZONTAL ELLIPSIS}{string[cut_how_much:]}" | |
70 | return string |
|
77 | return string | |
71 |
|
78 | |||
72 | def _elide(string:str, typed:str, min_elide=30)->str: |
|
79 | def _elide(string:str, typed:str, min_elide=30)->str: | |
73 | return _elide_typed( |
|
80 | return _elide_typed( | |
74 | _elide_point(string, min_elide=min_elide), |
|
81 | _elide_point(string, min_elide=min_elide), | |
75 | typed, min_elide=min_elide) |
|
82 | typed, min_elide=min_elide) | |
76 |
|
83 | |||
77 |
|
84 | |||
78 |
|
85 | |||
79 | def _adjust_completion_text_based_on_context(text, body, offset): |
|
86 | def _adjust_completion_text_based_on_context(text, body, offset): | |
80 | if text.endswith('=') and len(body) > offset and body[offset] == '=': |
|
87 | if text.endswith('=') and len(body) > offset and body[offset] == '=': | |
81 | return text[:-1] |
|
88 | return text[:-1] | |
82 | else: |
|
89 | else: | |
83 | return text |
|
90 | return text | |
84 |
|
91 | |||
85 |
|
92 | |||
86 | class IPythonPTCompleter(Completer): |
|
93 | class IPythonPTCompleter(Completer): | |
87 | """Adaptor to provide IPython completions to prompt_toolkit""" |
|
94 | """Adaptor to provide IPython completions to prompt_toolkit""" | |
88 | def __init__(self, ipy_completer=None, shell=None): |
|
95 | def __init__(self, ipy_completer=None, shell=None): | |
89 | if shell is None and ipy_completer is None: |
|
96 | if shell is None and ipy_completer is None: | |
90 | raise TypeError("Please pass shell=an InteractiveShell instance.") |
|
97 | raise TypeError("Please pass shell=an InteractiveShell instance.") | |
91 | self._ipy_completer = ipy_completer |
|
98 | self._ipy_completer = ipy_completer | |
92 | self.shell = shell |
|
99 | self.shell = shell | |
93 |
|
100 | |||
94 | @property |
|
101 | @property | |
95 | def ipy_completer(self): |
|
102 | def ipy_completer(self): | |
96 | if self._ipy_completer: |
|
103 | if self._ipy_completer: | |
97 | return self._ipy_completer |
|
104 | return self._ipy_completer | |
98 | else: |
|
105 | else: | |
99 | return self.shell.Completer |
|
106 | return self.shell.Completer | |
100 |
|
107 | |||
101 | def get_completions(self, document, complete_event): |
|
108 | def get_completions(self, document, complete_event): | |
102 | if not document.current_line.strip(): |
|
109 | if not document.current_line.strip(): | |
103 | return |
|
110 | return | |
104 | # Some bits of our completion system may print stuff (e.g. if a module |
|
111 | # Some bits of our completion system may print stuff (e.g. if a module | |
105 | # is imported). This context manager ensures that doesn't interfere with |
|
112 | # is imported). This context manager ensures that doesn't interfere with | |
106 | # the prompt. |
|
113 | # the prompt. | |
107 |
|
114 | |||
108 | with patch_stdout(), provisionalcompleter(): |
|
115 | with patch_stdout(), provisionalcompleter(): | |
109 | body = document.text |
|
116 | body = document.text | |
110 | cursor_row = document.cursor_position_row |
|
117 | cursor_row = document.cursor_position_row | |
111 | cursor_col = document.cursor_position_col |
|
118 | cursor_col = document.cursor_position_col | |
112 | cursor_position = document.cursor_position |
|
119 | cursor_position = document.cursor_position | |
113 | offset = cursor_to_position(body, cursor_row, cursor_col) |
|
120 | offset = cursor_to_position(body, cursor_row, cursor_col) | |
114 | try: |
|
121 | try: | |
115 | yield from self._get_completions(body, offset, cursor_position, self.ipy_completer) |
|
122 | yield from self._get_completions(body, offset, cursor_position, self.ipy_completer) | |
116 | except Exception as e: |
|
123 | except Exception as e: | |
117 | try: |
|
124 | try: | |
118 | exc_type, exc_value, exc_tb = sys.exc_info() |
|
125 | exc_type, exc_value, exc_tb = sys.exc_info() | |
119 | traceback.print_exception(exc_type, exc_value, exc_tb) |
|
126 | traceback.print_exception(exc_type, exc_value, exc_tb) | |
120 | except AttributeError: |
|
127 | except AttributeError: | |
121 | print('Unrecoverable Error in completions') |
|
128 | print('Unrecoverable Error in completions') | |
122 |
|
129 | |||
123 | @staticmethod |
|
130 | @staticmethod | |
124 | def _get_completions(body, offset, cursor_position, ipyc): |
|
131 | def _get_completions(body, offset, cursor_position, ipyc): | |
125 | """ |
|
132 | """ | |
126 | Private equivalent of get_completions() use only for unit_testing. |
|
133 | Private equivalent of get_completions() use only for unit_testing. | |
127 | """ |
|
134 | """ | |
128 | debug = getattr(ipyc, 'debug', False) |
|
135 | debug = getattr(ipyc, 'debug', False) | |
129 | completions = _deduplicate_completions( |
|
136 | completions = _deduplicate_completions( | |
130 | body, ipyc.completions(body, offset)) |
|
137 | body, ipyc.completions(body, offset)) | |
131 | for c in completions: |
|
138 | for c in completions: | |
132 | if not c.text: |
|
139 | if not c.text: | |
133 | # Guard against completion machinery giving us an empty string. |
|
140 | # Guard against completion machinery giving us an empty string. | |
134 | continue |
|
141 | continue | |
135 | text = unicodedata.normalize('NFC', c.text) |
|
142 | text = unicodedata.normalize('NFC', c.text) | |
136 | # When the first character of the completion has a zero length, |
|
143 | # When the first character of the completion has a zero length, | |
137 | # then it's probably a decomposed unicode character. E.g. caused by |
|
144 | # then it's probably a decomposed unicode character. E.g. caused by | |
138 | # the "\dot" completion. Try to compose again with the previous |
|
145 | # the "\dot" completion. Try to compose again with the previous | |
139 | # character. |
|
146 | # character. | |
140 | if wcwidth(text[0]) == 0: |
|
147 | if wcwidth(text[0]) == 0: | |
141 | if cursor_position + c.start > 0: |
|
148 | if cursor_position + c.start > 0: | |
142 | char_before = body[c.start - 1] |
|
149 | char_before = body[c.start - 1] | |
143 | fixed_text = unicodedata.normalize( |
|
150 | fixed_text = unicodedata.normalize( | |
144 | 'NFC', char_before + text) |
|
151 | 'NFC', char_before + text) | |
145 |
|
152 | |||
146 | # Yield the modified completion instead, if this worked. |
|
153 | # Yield the modified completion instead, if this worked. | |
147 | if wcwidth(text[0:1]) == 1: |
|
154 | if wcwidth(text[0:1]) == 1: | |
148 | yield Completion(fixed_text, start_position=c.start - offset - 1) |
|
155 | yield Completion(fixed_text, start_position=c.start - offset - 1) | |
149 | continue |
|
156 | continue | |
150 |
|
157 | |||
151 | # TODO: Use Jedi to determine meta_text |
|
158 | # TODO: Use Jedi to determine meta_text | |
152 | # (Jedi currently has a bug that results in incorrect information.) |
|
159 | # (Jedi currently has a bug that results in incorrect information.) | |
153 | # meta_text = '' |
|
160 | # meta_text = '' | |
154 | # yield Completion(m, start_position=start_pos, |
|
161 | # yield Completion(m, start_position=start_pos, | |
155 | # display_meta=meta_text) |
|
162 | # display_meta=meta_text) | |
156 | display_text = c.text |
|
163 | display_text = c.text | |
157 |
|
164 | |||
158 | adjusted_text = _adjust_completion_text_based_on_context(c.text, body, offset) |
|
165 | adjusted_text = _adjust_completion_text_based_on_context(c.text, body, offset) | |
159 | if c.type == 'function': |
|
166 | if c.type == 'function': | |
160 | yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text+'()', body[c.start:c.end]), display_meta=c.type+c.signature) |
|
167 | yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text+'()', body[c.start:c.end]), display_meta=c.type+c.signature) | |
161 | else: |
|
168 | else: | |
162 | yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text, body[c.start:c.end]), display_meta=c.type) |
|
169 | yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text, body[c.start:c.end]), display_meta=c.type) | |
163 |
|
170 | |||
164 | class IPythonPTLexer(Lexer): |
|
171 | class IPythonPTLexer(Lexer): | |
165 | """ |
|
172 | """ | |
166 | Wrapper around PythonLexer and BashLexer. |
|
173 | Wrapper around PythonLexer and BashLexer. | |
167 | """ |
|
174 | """ | |
168 | def __init__(self): |
|
175 | def __init__(self): | |
169 | l = pygments_lexers |
|
176 | l = pygments_lexers | |
170 | self.python_lexer = PygmentsLexer(l.Python3Lexer) |
|
177 | self.python_lexer = PygmentsLexer(l.Python3Lexer) | |
171 | self.shell_lexer = PygmentsLexer(l.BashLexer) |
|
178 | self.shell_lexer = PygmentsLexer(l.BashLexer) | |
172 |
|
179 | |||
173 | self.magic_lexers = { |
|
180 | self.magic_lexers = { | |
174 | 'HTML': PygmentsLexer(l.HtmlLexer), |
|
181 | 'HTML': PygmentsLexer(l.HtmlLexer), | |
175 | 'html': PygmentsLexer(l.HtmlLexer), |
|
182 | 'html': PygmentsLexer(l.HtmlLexer), | |
176 | 'javascript': PygmentsLexer(l.JavascriptLexer), |
|
183 | 'javascript': PygmentsLexer(l.JavascriptLexer), | |
177 | 'js': PygmentsLexer(l.JavascriptLexer), |
|
184 | 'js': PygmentsLexer(l.JavascriptLexer), | |
178 | 'perl': PygmentsLexer(l.PerlLexer), |
|
185 | 'perl': PygmentsLexer(l.PerlLexer), | |
179 | 'ruby': PygmentsLexer(l.RubyLexer), |
|
186 | 'ruby': PygmentsLexer(l.RubyLexer), | |
180 | 'latex': PygmentsLexer(l.TexLexer), |
|
187 | 'latex': PygmentsLexer(l.TexLexer), | |
181 | } |
|
188 | } | |
182 |
|
189 | |||
183 | def lex_document(self, document): |
|
190 | def lex_document(self, document): | |
184 | text = document.text.lstrip() |
|
191 | text = document.text.lstrip() | |
185 |
|
192 | |||
186 | lexer = self.python_lexer |
|
193 | lexer = self.python_lexer | |
187 |
|
194 | |||
188 | if text.startswith('!') or text.startswith('%%bash'): |
|
195 | if text.startswith('!') or text.startswith('%%bash'): | |
189 | lexer = self.shell_lexer |
|
196 | lexer = self.shell_lexer | |
190 |
|
197 | |||
191 | elif text.startswith('%%'): |
|
198 | elif text.startswith('%%'): | |
192 | for magic, l in self.magic_lexers.items(): |
|
199 | for magic, l in self.magic_lexers.items(): | |
193 | if text.startswith('%%' + magic): |
|
200 | if text.startswith('%%' + magic): | |
194 | lexer = l |
|
201 | lexer = l | |
195 | break |
|
202 | break | |
196 |
|
203 | |||
197 | return lexer.lex_document(document) |
|
204 | return lexer.lex_document(document) |
General Comments 0
You need to be logged in to leave comments.
Login now