Show More
1 | NO CONTENT: modified file |
|
NO CONTENT: modified file |
@@ -1,168 +1,190 b'' | |||||
1 | """prompt-toolkit utilities |
|
1 | """prompt-toolkit utilities | |
2 |
|
2 | |||
3 | Everything in this module is a private API, |
|
3 | Everything in this module is a private API, | |
4 | not to be used outside IPython. |
|
4 | not to be used outside IPython. | |
5 | """ |
|
5 | """ | |
6 |
|
6 | |||
7 | # Copyright (c) IPython Development Team. |
|
7 | # Copyright (c) IPython Development Team. | |
8 | # Distributed under the terms of the Modified BSD License. |
|
8 | # Distributed under the terms of the Modified BSD License. | |
9 |
|
9 | |||
10 | import unicodedata |
|
10 | import unicodedata | |
11 | from wcwidth import wcwidth |
|
11 | from wcwidth import wcwidth | |
12 |
|
12 | |||
13 | from IPython.core.completer import ( |
|
13 | from IPython.core.completer import ( | |
14 | provisionalcompleter, cursor_to_position, |
|
14 | provisionalcompleter, cursor_to_position, | |
15 | _deduplicate_completions) |
|
15 | _deduplicate_completions) | |
16 | from prompt_toolkit.completion import Completer, Completion |
|
16 | from prompt_toolkit.completion import Completer, Completion | |
17 | from prompt_toolkit.lexers import Lexer |
|
17 | from prompt_toolkit.lexers import Lexer | |
18 | from prompt_toolkit.lexers import PygmentsLexer |
|
18 | from prompt_toolkit.lexers import PygmentsLexer | |
19 | from prompt_toolkit.patch_stdout import patch_stdout |
|
19 | from prompt_toolkit.patch_stdout import patch_stdout | |
20 |
|
20 | |||
21 | import pygments.lexers as pygments_lexers |
|
21 | import pygments.lexers as pygments_lexers | |
22 | import os |
|
22 | import os | |
23 |
|
23 | |||
24 | _completion_sentinel = object() |
|
24 | _completion_sentinel = object() | |
25 |
|
25 | |||
26 | def _elide(string, *, min_elide=30): |
|
26 | def _elide_point(string, *, min_elide=30): | |
27 | """ |
|
27 | """ | |
28 | If a string is long enough, and has at least 3 dots, |
|
28 | If a string is long enough, and has at least 3 dots, | |
29 | replace the middle part with ellipses. |
|
29 | replace the middle part with ellipses. | |
30 |
|
30 | |||
31 | If a string naming a file is long enough, and has at least 3 slashes, |
|
31 | If a string naming a file is long enough, and has at least 3 slashes, | |
32 | replace the middle part with ellipses. |
|
32 | replace the middle part with ellipses. | |
33 |
|
33 | |||
34 | If three consecutive dots, or two consecutive dots are encountered these are |
|
34 | If three consecutive dots, or two consecutive dots are encountered these are | |
35 | replaced by the equivalents HORIZONTAL ELLIPSIS or TWO DOT LEADER unicode |
|
35 | replaced by the equivalents HORIZONTAL ELLIPSIS or TWO DOT LEADER unicode | |
36 | equivalents |
|
36 | equivalents | |
37 | """ |
|
37 | """ | |
38 | string = string.replace('...','\N{HORIZONTAL ELLIPSIS}') |
|
38 | string = string.replace('...','\N{HORIZONTAL ELLIPSIS}') | |
39 | string = string.replace('..','\N{TWO DOT LEADER}') |
|
39 | string = string.replace('..','\N{TWO DOT LEADER}') | |
40 | if len(string) < min_elide: |
|
40 | if len(string) < min_elide: | |
41 | return string |
|
41 | return string | |
42 |
|
42 | |||
43 | object_parts = string.split('.') |
|
43 | object_parts = string.split('.') | |
44 | file_parts = string.split(os.sep) |
|
44 | file_parts = string.split(os.sep) | |
45 | if file_parts[-1] == '': |
|
45 | if file_parts[-1] == '': | |
46 | file_parts.pop() |
|
46 | file_parts.pop() | |
47 |
|
47 | |||
48 | if len(object_parts) > 3: |
|
48 | if len(object_parts) > 3: | |
49 | return '{}.{}\N{HORIZONTAL ELLIPSIS}{}.{}'.format(object_parts[0], object_parts[1][0], object_parts[-2][-1], object_parts[-1]) |
|
49 | return '{}.{}\N{HORIZONTAL ELLIPSIS}{}.{}'.format(object_parts[0], object_parts[1][0], object_parts[-2][-1], object_parts[-1]) | |
50 |
|
50 | |||
51 | elif len(file_parts) > 3: |
|
51 | elif len(file_parts) > 3: | |
52 | return ('{}' + os.sep + '{}\N{HORIZONTAL ELLIPSIS}{}' + os.sep + '{}').format(file_parts[0], file_parts[1][0], file_parts[-2][-1], file_parts[-1]) |
|
52 | return ('{}' + os.sep + '{}\N{HORIZONTAL ELLIPSIS}{}' + os.sep + '{}').format(file_parts[0], file_parts[1][0], file_parts[-2][-1], file_parts[-1]) | |
53 |
|
53 | |||
54 | return string |
|
54 | return string | |
55 |
|
55 | |||
|
56 | def _elide_typed(string, typed, *, min_elide=30): | |||
|
57 | """ | |||
|
58 | Elide the middle of a long string if the beginning has already been typed. | |||
|
59 | """ | |||
|
60 | ||||
|
61 | if len(string) < min_elide: | |||
|
62 | return string | |||
|
63 | cut_how_much = len(typed)-3 | |||
|
64 | if string.startswith(typed) and len(string)> len(typed): | |||
|
65 | return f"{string[:3]}\N{HORIZONTAL ELLIPSIS}{string[cut_how_much:]}" | |||
|
66 | return string | |||
|
67 | ||||
|
68 | def _elide(string, typed, min_elide=30): | |||
|
69 | return _elide_typed( | |||
|
70 | _elide_point(string, min_elide=min_elide), | |||
|
71 | typed, min_elide=min_elide) | |||
|
72 | ||||
|
73 | ||||
56 |
|
74 | |||
57 | def _adjust_completion_text_based_on_context(text, body, offset): |
|
75 | def _adjust_completion_text_based_on_context(text, body, offset): | |
58 | if text.endswith('=') and len(body) > offset and body[offset] == '=': |
|
76 | if text.endswith('=') and len(body) > offset and body[offset] == '=': | |
59 | return text[:-1] |
|
77 | return text[:-1] | |
60 | else: |
|
78 | else: | |
61 | return text |
|
79 | return text | |
62 |
|
80 | |||
63 |
|
81 | |||
64 | class IPythonPTCompleter(Completer): |
|
82 | class IPythonPTCompleter(Completer): | |
65 | """Adaptor to provide IPython completions to prompt_toolkit""" |
|
83 | """Adaptor to provide IPython completions to prompt_toolkit""" | |
66 | def __init__(self, ipy_completer=None, shell=None): |
|
84 | def __init__(self, ipy_completer=None, shell=None): | |
67 | if shell is None and ipy_completer is None: |
|
85 | if shell is None and ipy_completer is None: | |
68 | raise TypeError("Please pass shell=an InteractiveShell instance.") |
|
86 | raise TypeError("Please pass shell=an InteractiveShell instance.") | |
69 | self._ipy_completer = ipy_completer |
|
87 | self._ipy_completer = ipy_completer | |
70 | self.shell = shell |
|
88 | self.shell = shell | |
71 |
|
89 | |||
72 | @property |
|
90 | @property | |
73 | def ipy_completer(self): |
|
91 | def ipy_completer(self): | |
74 | if self._ipy_completer: |
|
92 | if self._ipy_completer: | |
75 | return self._ipy_completer |
|
93 | return self._ipy_completer | |
76 | else: |
|
94 | else: | |
77 | return self.shell.Completer |
|
95 | return self.shell.Completer | |
78 |
|
96 | |||
79 | def get_completions(self, document, complete_event): |
|
97 | def get_completions(self, document, complete_event): | |
80 | if not document.current_line.strip(): |
|
98 | if not document.current_line.strip(): | |
81 | return |
|
99 | return | |
82 | # Some bits of our completion system may print stuff (e.g. if a module |
|
100 | # Some bits of our completion system may print stuff (e.g. if a module | |
83 | # is imported). This context manager ensures that doesn't interfere with |
|
101 | # is imported). This context manager ensures that doesn't interfere with | |
84 | # the prompt. |
|
102 | # the prompt. | |
85 |
|
103 | |||
86 | with patch_stdout(), provisionalcompleter(): |
|
104 | with patch_stdout(), provisionalcompleter(): | |
87 | body = document.text |
|
105 | body = document.text | |
88 | cursor_row = document.cursor_position_row |
|
106 | cursor_row = document.cursor_position_row | |
89 | cursor_col = document.cursor_position_col |
|
107 | cursor_col = document.cursor_position_col | |
90 | cursor_position = document.cursor_position |
|
108 | cursor_position = document.cursor_position | |
91 | offset = cursor_to_position(body, cursor_row, cursor_col) |
|
109 | offset = cursor_to_position(body, cursor_row, cursor_col) | |
|
110 | try: | |||
92 | yield from self._get_completions(body, offset, cursor_position, self.ipy_completer) |
|
111 | yield from self._get_completions(body, offset, cursor_position, self.ipy_completer) | |
|
112 | except Exception as e: | |||
|
113 | from traceback import print_tb | |||
|
114 | print_tb(e) | |||
93 |
|
115 | |||
94 | @staticmethod |
|
116 | @staticmethod | |
95 | def _get_completions(body, offset, cursor_position, ipyc): |
|
117 | def _get_completions(body, offset, cursor_position, ipyc): | |
96 | """ |
|
118 | """ | |
97 | Private equivalent of get_completions() use only for unit_testing. |
|
119 | Private equivalent of get_completions() use only for unit_testing. | |
98 | """ |
|
120 | """ | |
99 | debug = getattr(ipyc, 'debug', False) |
|
121 | debug = getattr(ipyc, 'debug', False) | |
100 | completions = _deduplicate_completions( |
|
122 | completions = _deduplicate_completions( | |
101 | body, ipyc.completions(body, offset)) |
|
123 | body, ipyc.completions(body, offset)) | |
102 | for c in completions: |
|
124 | for c in completions: | |
103 | if not c.text: |
|
125 | if not c.text: | |
104 | # Guard against completion machinery giving us an empty string. |
|
126 | # Guard against completion machinery giving us an empty string. | |
105 | continue |
|
127 | continue | |
106 | text = unicodedata.normalize('NFC', c.text) |
|
128 | text = unicodedata.normalize('NFC', c.text) | |
107 | # When the first character of the completion has a zero length, |
|
129 | # When the first character of the completion has a zero length, | |
108 | # then it's probably a decomposed unicode character. E.g. caused by |
|
130 | # then it's probably a decomposed unicode character. E.g. caused by | |
109 | # the "\dot" completion. Try to compose again with the previous |
|
131 | # the "\dot" completion. Try to compose again with the previous | |
110 | # character. |
|
132 | # character. | |
111 | if wcwidth(text[0]) == 0: |
|
133 | if wcwidth(text[0]) == 0: | |
112 | if cursor_position + c.start > 0: |
|
134 | if cursor_position + c.start > 0: | |
113 | char_before = body[c.start - 1] |
|
135 | char_before = body[c.start - 1] | |
114 | fixed_text = unicodedata.normalize( |
|
136 | fixed_text = unicodedata.normalize( | |
115 | 'NFC', char_before + text) |
|
137 | 'NFC', char_before + text) | |
116 |
|
138 | |||
117 | # Yield the modified completion instead, if this worked. |
|
139 | # Yield the modified completion instead, if this worked. | |
118 | if wcwidth(text[0:1]) == 1: |
|
140 | if wcwidth(text[0:1]) == 1: | |
119 | yield Completion(fixed_text, start_position=c.start - offset - 1) |
|
141 | yield Completion(fixed_text, start_position=c.start - offset - 1) | |
120 | continue |
|
142 | continue | |
121 |
|
143 | |||
122 | # TODO: Use Jedi to determine meta_text |
|
144 | # TODO: Use Jedi to determine meta_text | |
123 | # (Jedi currently has a bug that results in incorrect information.) |
|
145 | # (Jedi currently has a bug that results in incorrect information.) | |
124 | # meta_text = '' |
|
146 | # meta_text = '' | |
125 | # yield Completion(m, start_position=start_pos, |
|
147 | # yield Completion(m, start_position=start_pos, | |
126 | # display_meta=meta_text) |
|
148 | # display_meta=meta_text) | |
127 | display_text = c.text |
|
149 | display_text = c.text | |
128 |
|
150 | |||
129 | adjusted_text = _adjust_completion_text_based_on_context(c.text, body, offset) |
|
151 | adjusted_text = _adjust_completion_text_based_on_context(c.text, body, offset) | |
130 | if c.type == 'function': |
|
152 | if c.type == 'function': | |
131 | yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text+'()'), display_meta=c.type+c.signature) |
|
153 | yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text+'()', body[c.start:c.end]), display_meta=c.type+c.signature) | |
132 | else: |
|
154 | else: | |
133 | yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text), display_meta=c.type) |
|
155 | yield Completion(adjusted_text, start_position=c.start - offset, display=_elide(display_text, body[c.start:c.end]), display_meta=c.type) | |
134 |
|
156 | |||
135 | class IPythonPTLexer(Lexer): |
|
157 | class IPythonPTLexer(Lexer): | |
136 | """ |
|
158 | """ | |
137 | Wrapper around PythonLexer and BashLexer. |
|
159 | Wrapper around PythonLexer and BashLexer. | |
138 | """ |
|
160 | """ | |
139 | def __init__(self): |
|
161 | def __init__(self): | |
140 | l = pygments_lexers |
|
162 | l = pygments_lexers | |
141 | self.python_lexer = PygmentsLexer(l.Python3Lexer) |
|
163 | self.python_lexer = PygmentsLexer(l.Python3Lexer) | |
142 | self.shell_lexer = PygmentsLexer(l.BashLexer) |
|
164 | self.shell_lexer = PygmentsLexer(l.BashLexer) | |
143 |
|
165 | |||
144 | self.magic_lexers = { |
|
166 | self.magic_lexers = { | |
145 | 'HTML': PygmentsLexer(l.HtmlLexer), |
|
167 | 'HTML': PygmentsLexer(l.HtmlLexer), | |
146 | 'html': PygmentsLexer(l.HtmlLexer), |
|
168 | 'html': PygmentsLexer(l.HtmlLexer), | |
147 | 'javascript': PygmentsLexer(l.JavascriptLexer), |
|
169 | 'javascript': PygmentsLexer(l.JavascriptLexer), | |
148 | 'js': PygmentsLexer(l.JavascriptLexer), |
|
170 | 'js': PygmentsLexer(l.JavascriptLexer), | |
149 | 'perl': PygmentsLexer(l.PerlLexer), |
|
171 | 'perl': PygmentsLexer(l.PerlLexer), | |
150 | 'ruby': PygmentsLexer(l.RubyLexer), |
|
172 | 'ruby': PygmentsLexer(l.RubyLexer), | |
151 | 'latex': PygmentsLexer(l.TexLexer), |
|
173 | 'latex': PygmentsLexer(l.TexLexer), | |
152 | } |
|
174 | } | |
153 |
|
175 | |||
154 | def lex_document(self, document): |
|
176 | def lex_document(self, document): | |
155 | text = document.text.lstrip() |
|
177 | text = document.text.lstrip() | |
156 |
|
178 | |||
157 | lexer = self.python_lexer |
|
179 | lexer = self.python_lexer | |
158 |
|
180 | |||
159 | if text.startswith('!') or text.startswith('%%bash'): |
|
181 | if text.startswith('!') or text.startswith('%%bash'): | |
160 | lexer = self.shell_lexer |
|
182 | lexer = self.shell_lexer | |
161 |
|
183 | |||
162 | elif text.startswith('%%'): |
|
184 | elif text.startswith('%%'): | |
163 | for magic, l in self.magic_lexers.items(): |
|
185 | for magic, l in self.magic_lexers.items(): | |
164 | if text.startswith('%%' + magic): |
|
186 | if text.startswith('%%' + magic): | |
165 | lexer = l |
|
187 | lexer = l | |
166 | break |
|
188 | break | |
167 |
|
189 | |||
168 | return lexer.lex_document(document) |
|
190 | return lexer.lex_document(document) |
General Comments 0
You need to be logged in to leave comments.
Login now