##// END OF EJS Templates
fix type_extensions
M Bussonnier -
Show More
@@ -1,3377 +1,3379
1 """Completion for IPython.
1 """Completion for IPython.
2
2
3 This module started as fork of the rlcompleter module in the Python standard
3 This module started as fork of the rlcompleter module in the Python standard
4 library. The original enhancements made to rlcompleter have been sent
4 library. The original enhancements made to rlcompleter have been sent
5 upstream and were accepted as of Python 2.3,
5 upstream and were accepted as of Python 2.3,
6
6
7 This module now support a wide variety of completion mechanism both available
7 This module now support a wide variety of completion mechanism both available
8 for normal classic Python code, as well as completer for IPython specific
8 for normal classic Python code, as well as completer for IPython specific
9 Syntax like magics.
9 Syntax like magics.
10
10
11 Latex and Unicode completion
11 Latex and Unicode completion
12 ============================
12 ============================
13
13
14 IPython and compatible frontends not only can complete your code, but can help
14 IPython and compatible frontends not only can complete your code, but can help
15 you to input a wide range of characters. In particular we allow you to insert
15 you to input a wide range of characters. In particular we allow you to insert
16 a unicode character using the tab completion mechanism.
16 a unicode character using the tab completion mechanism.
17
17
18 Forward latex/unicode completion
18 Forward latex/unicode completion
19 --------------------------------
19 --------------------------------
20
20
21 Forward completion allows you to easily type a unicode character using its latex
21 Forward completion allows you to easily type a unicode character using its latex
22 name, or unicode long description. To do so type a backslash follow by the
22 name, or unicode long description. To do so type a backslash follow by the
23 relevant name and press tab:
23 relevant name and press tab:
24
24
25
25
26 Using latex completion:
26 Using latex completion:
27
27
28 .. code::
28 .. code::
29
29
30 \\alpha<tab>
30 \\alpha<tab>
31 Ξ±
31 Ξ±
32
32
33 or using unicode completion:
33 or using unicode completion:
34
34
35
35
36 .. code::
36 .. code::
37
37
38 \\GREEK SMALL LETTER ALPHA<tab>
38 \\GREEK SMALL LETTER ALPHA<tab>
39 Ξ±
39 Ξ±
40
40
41
41
42 Only valid Python identifiers will complete. Combining characters (like arrow or
42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 dots) are also available, unlike latex they need to be put after the their
43 dots) are also available, unlike latex they need to be put after the their
44 counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
44 counterpart that is to say, ``F\\\\vec<tab>`` is correct, not ``\\\\vec<tab>F``.
45
45
46 Some browsers are known to display combining characters incorrectly.
46 Some browsers are known to display combining characters incorrectly.
47
47
48 Backward latex completion
48 Backward latex completion
49 -------------------------
49 -------------------------
50
50
51 It is sometime challenging to know how to type a character, if you are using
51 It is sometime challenging to know how to type a character, if you are using
52 IPython, or any compatible frontend you can prepend backslash to the character
52 IPython, or any compatible frontend you can prepend backslash to the character
53 and press :kbd:`Tab` to expand it to its latex form.
53 and press :kbd:`Tab` to expand it to its latex form.
54
54
55 .. code::
55 .. code::
56
56
57 \\Ξ±<tab>
57 \\Ξ±<tab>
58 \\alpha
58 \\alpha
59
59
60
60
61 Both forward and backward completions can be deactivated by setting the
61 Both forward and backward completions can be deactivated by setting the
62 :std:configtrait:`Completer.backslash_combining_completions` option to
62 :std:configtrait:`Completer.backslash_combining_completions` option to
63 ``False``.
63 ``False``.
64
64
65
65
66 Experimental
66 Experimental
67 ============
67 ============
68
68
69 Starting with IPython 6.0, this module can make use of the Jedi library to
69 Starting with IPython 6.0, this module can make use of the Jedi library to
70 generate completions both using static analysis of the code, and dynamically
70 generate completions both using static analysis of the code, and dynamically
71 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
71 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
72 for Python. The APIs attached to this new mechanism is unstable and will
72 for Python. The APIs attached to this new mechanism is unstable and will
73 raise unless use in an :any:`provisionalcompleter` context manager.
73 raise unless use in an :any:`provisionalcompleter` context manager.
74
74
75 You will find that the following are experimental:
75 You will find that the following are experimental:
76
76
77 - :any:`provisionalcompleter`
77 - :any:`provisionalcompleter`
78 - :any:`IPCompleter.completions`
78 - :any:`IPCompleter.completions`
79 - :any:`Completion`
79 - :any:`Completion`
80 - :any:`rectify_completions`
80 - :any:`rectify_completions`
81
81
82 .. note::
82 .. note::
83
83
84 better name for :any:`rectify_completions` ?
84 better name for :any:`rectify_completions` ?
85
85
86 We welcome any feedback on these new API, and we also encourage you to try this
86 We welcome any feedback on these new API, and we also encourage you to try this
87 module in debug mode (start IPython with ``--Completer.debug=True``) in order
87 module in debug mode (start IPython with ``--Completer.debug=True``) in order
88 to have extra logging information if :any:`jedi` is crashing, or if current
88 to have extra logging information if :any:`jedi` is crashing, or if current
89 IPython completer pending deprecations are returning results not yet handled
89 IPython completer pending deprecations are returning results not yet handled
90 by :any:`jedi`
90 by :any:`jedi`
91
91
92 Using Jedi for tab completion allow snippets like the following to work without
92 Using Jedi for tab completion allow snippets like the following to work without
93 having to execute any code:
93 having to execute any code:
94
94
95 >>> myvar = ['hello', 42]
95 >>> myvar = ['hello', 42]
96 ... myvar[1].bi<tab>
96 ... myvar[1].bi<tab>
97
97
98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
99 executing almost any code unlike the deprecated :any:`IPCompleter.greedy`
99 executing almost any code unlike the deprecated :any:`IPCompleter.greedy`
100 option.
100 option.
101
101
102 Be sure to update :any:`jedi` to the latest stable version or to try the
102 Be sure to update :any:`jedi` to the latest stable version or to try the
103 current development version to get better completions.
103 current development version to get better completions.
104
104
105 Matchers
105 Matchers
106 ========
106 ========
107
107
108 All completions routines are implemented using unified *Matchers* API.
108 All completions routines are implemented using unified *Matchers* API.
109 The matchers API is provisional and subject to change without notice.
109 The matchers API is provisional and subject to change without notice.
110
110
111 The built-in matchers include:
111 The built-in matchers include:
112
112
113 - :any:`IPCompleter.dict_key_matcher`: dictionary key completions,
113 - :any:`IPCompleter.dict_key_matcher`: dictionary key completions,
114 - :any:`IPCompleter.magic_matcher`: completions for magics,
114 - :any:`IPCompleter.magic_matcher`: completions for magics,
115 - :any:`IPCompleter.unicode_name_matcher`,
115 - :any:`IPCompleter.unicode_name_matcher`,
116 :any:`IPCompleter.fwd_unicode_matcher`
116 :any:`IPCompleter.fwd_unicode_matcher`
117 and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_,
117 and :any:`IPCompleter.latex_name_matcher`: see `Forward latex/unicode completion`_,
118 - :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_,
118 - :any:`back_unicode_name_matcher` and :any:`back_latex_name_matcher`: see `Backward latex completion`_,
119 - :any:`IPCompleter.file_matcher`: paths to files and directories,
119 - :any:`IPCompleter.file_matcher`: paths to files and directories,
120 - :any:`IPCompleter.python_func_kw_matcher` - function keywords,
120 - :any:`IPCompleter.python_func_kw_matcher` - function keywords,
121 - :any:`IPCompleter.python_matches` - globals and attributes (v1 API),
121 - :any:`IPCompleter.python_matches` - globals and attributes (v1 API),
122 - ``IPCompleter.jedi_matcher`` - static analysis with Jedi,
122 - ``IPCompleter.jedi_matcher`` - static analysis with Jedi,
123 - :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default
123 - :any:`IPCompleter.custom_completer_matcher` - pluggable completer with a default
124 implementation in :any:`InteractiveShell` which uses IPython hooks system
124 implementation in :any:`InteractiveShell` which uses IPython hooks system
125 (`complete_command`) with string dispatch (including regular expressions).
125 (`complete_command`) with string dispatch (including regular expressions).
126 Differently to other matchers, ``custom_completer_matcher`` will not suppress
126 Differently to other matchers, ``custom_completer_matcher`` will not suppress
127 Jedi results to match behaviour in earlier IPython versions.
127 Jedi results to match behaviour in earlier IPython versions.
128
128
129 Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list.
129 Custom matchers can be added by appending to ``IPCompleter.custom_matchers`` list.
130
130
131 Matcher API
131 Matcher API
132 -----------
132 -----------
133
133
134 Simplifying some details, the ``Matcher`` interface can described as
134 Simplifying some details, the ``Matcher`` interface can described as
135
135
136 .. code-block::
136 .. code-block::
137
137
138 MatcherAPIv1 = Callable[[str], list[str]]
138 MatcherAPIv1 = Callable[[str], list[str]]
139 MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult]
139 MatcherAPIv2 = Callable[[CompletionContext], SimpleMatcherResult]
140
140
141 Matcher = MatcherAPIv1 | MatcherAPIv2
141 Matcher = MatcherAPIv1 | MatcherAPIv2
142
142
143 The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0
143 The ``MatcherAPIv1`` reflects the matcher API as available prior to IPython 8.6.0
144 and remains supported as a simplest way for generating completions. This is also
144 and remains supported as a simplest way for generating completions. This is also
145 currently the only API supported by the IPython hooks system `complete_command`.
145 currently the only API supported by the IPython hooks system `complete_command`.
146
146
147 To distinguish between matcher versions ``matcher_api_version`` attribute is used.
147 To distinguish between matcher versions ``matcher_api_version`` attribute is used.
148 More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers,
148 More precisely, the API allows to omit ``matcher_api_version`` for v1 Matchers,
149 and requires a literal ``2`` for v2 Matchers.
149 and requires a literal ``2`` for v2 Matchers.
150
150
151 Once the API stabilises future versions may relax the requirement for specifying
151 Once the API stabilises future versions may relax the requirement for specifying
152 ``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore
152 ``matcher_api_version`` by switching to :any:`functools.singledispatch`, therefore
153 please do not rely on the presence of ``matcher_api_version`` for any purposes.
153 please do not rely on the presence of ``matcher_api_version`` for any purposes.
154
154
155 Suppression of competing matchers
155 Suppression of competing matchers
156 ---------------------------------
156 ---------------------------------
157
157
158 By default results from all matchers are combined, in the order determined by
158 By default results from all matchers are combined, in the order determined by
159 their priority. Matchers can request to suppress results from subsequent
159 their priority. Matchers can request to suppress results from subsequent
160 matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``.
160 matchers by setting ``suppress`` to ``True`` in the ``MatcherResult``.
161
161
162 When multiple matchers simultaneously request suppression, the results from of
162 When multiple matchers simultaneously request suppression, the results from of
163 the matcher with higher priority will be returned.
163 the matcher with higher priority will be returned.
164
164
165 Sometimes it is desirable to suppress most but not all other matchers;
165 Sometimes it is desirable to suppress most but not all other matchers;
166 this can be achieved by adding a set of identifiers of matchers which
166 this can be achieved by adding a set of identifiers of matchers which
167 should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
167 should not be suppressed to ``MatcherResult`` under ``do_not_suppress`` key.
168
168
169 The suppression behaviour can is user-configurable via
169 The suppression behaviour can is user-configurable via
170 :std:configtrait:`IPCompleter.suppress_competing_matchers`.
170 :std:configtrait:`IPCompleter.suppress_competing_matchers`.
171 """
171 """
172
172
173
173
174 # Copyright (c) IPython Development Team.
174 # Copyright (c) IPython Development Team.
175 # Distributed under the terms of the Modified BSD License.
175 # Distributed under the terms of the Modified BSD License.
176 #
176 #
177 # Some of this code originated from rlcompleter in the Python standard library
177 # Some of this code originated from rlcompleter in the Python standard library
178 # Copyright (C) 2001 Python Software Foundation, www.python.org
178 # Copyright (C) 2001 Python Software Foundation, www.python.org
179
179
180 from __future__ import annotations
180 from __future__ import annotations
181 import builtins as builtin_mod
181 import builtins as builtin_mod
182 import enum
182 import enum
183 import glob
183 import glob
184 import inspect
184 import inspect
185 import itertools
185 import itertools
186 import keyword
186 import keyword
187 import os
187 import os
188 import re
188 import re
189 import string
189 import string
190 import sys
190 import sys
191 import tokenize
191 import tokenize
192 import time
192 import time
193 import unicodedata
193 import unicodedata
194 import uuid
194 import uuid
195 import warnings
195 import warnings
196 from ast import literal_eval
196 from ast import literal_eval
197 from collections import defaultdict
197 from collections import defaultdict
198 from contextlib import contextmanager
198 from contextlib import contextmanager
199 from dataclasses import dataclass
199 from dataclasses import dataclass
200 from functools import cached_property, partial
200 from functools import cached_property, partial
201 from types import SimpleNamespace
201 from types import SimpleNamespace
202 from typing import (
202 from typing import (
203 Iterable,
203 Iterable,
204 Iterator,
204 Iterator,
205 List,
205 List,
206 Tuple,
206 Tuple,
207 Union,
207 Union,
208 Any,
208 Any,
209 Sequence,
209 Sequence,
210 Dict,
210 Dict,
211 Optional,
211 Optional,
212 TYPE_CHECKING,
212 TYPE_CHECKING,
213 Set,
213 Set,
214 Sized,
214 Sized,
215 TypeVar,
215 TypeVar,
216 Literal,
216 Literal,
217 )
217 )
218
218
219 from IPython.core.guarded_eval import guarded_eval, EvaluationContext
219 from IPython.core.guarded_eval import guarded_eval, EvaluationContext
220 from IPython.core.error import TryNext
220 from IPython.core.error import TryNext
221 from IPython.core.inputtransformer2 import ESC_MAGIC
221 from IPython.core.inputtransformer2 import ESC_MAGIC
222 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
222 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
223 from IPython.core.oinspect import InspectColors
223 from IPython.core.oinspect import InspectColors
224 from IPython.testing.skipdoctest import skip_doctest
224 from IPython.testing.skipdoctest import skip_doctest
225 from IPython.utils import generics
225 from IPython.utils import generics
226 from IPython.utils.decorators import sphinx_options
226 from IPython.utils.decorators import sphinx_options
227 from IPython.utils.dir2 import dir2, get_real_method
227 from IPython.utils.dir2 import dir2, get_real_method
228 from IPython.utils.docs import GENERATING_DOCUMENTATION
228 from IPython.utils.docs import GENERATING_DOCUMENTATION
229 from IPython.utils.path import ensure_dir_exists
229 from IPython.utils.path import ensure_dir_exists
230 from IPython.utils.process import arg_split
230 from IPython.utils.process import arg_split
231 from traitlets import (
231 from traitlets import (
232 Bool,
232 Bool,
233 Enum,
233 Enum,
234 Int,
234 Int,
235 List as ListTrait,
235 List as ListTrait,
236 Unicode,
236 Unicode,
237 Dict as DictTrait,
237 Dict as DictTrait,
238 Union as UnionTrait,
238 Union as UnionTrait,
239 observe,
239 observe,
240 )
240 )
241 from traitlets.config.configurable import Configurable
241 from traitlets.config.configurable import Configurable
242
242
243 import __main__
243 import __main__
244
244
245 from typing import cast
245 from typing import cast
246 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
246
247 if sys.version_info < (3, 12):
248 from typing_extensions import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
249 else:
250 from typing import TypedDict, NotRequired, Protocol, TypeAlias, TypeGuard
247
251
248
252
249 # skip module docstests
253 # skip module docstests
250 __skip_doctest__ = True
254 __skip_doctest__ = True
251
255
252
256
253 try:
257 try:
254 import jedi
258 import jedi
255 jedi.settings.case_insensitive_completion = False
259 jedi.settings.case_insensitive_completion = False
256 import jedi.api.helpers
260 import jedi.api.helpers
257 import jedi.api.classes
261 import jedi.api.classes
258 JEDI_INSTALLED = True
262 JEDI_INSTALLED = True
259 except ImportError:
263 except ImportError:
260 JEDI_INSTALLED = False
264 JEDI_INSTALLED = False
261
265
262
266
263 if GENERATING_DOCUMENTATION:
264 from typing import TypedDict
265
267
266 # -----------------------------------------------------------------------------
268 # -----------------------------------------------------------------------------
267 # Globals
269 # Globals
268 #-----------------------------------------------------------------------------
270 #-----------------------------------------------------------------------------
269
271
270 # ranges where we have most of the valid unicode names. We could be more finer
272 # ranges where we have most of the valid unicode names. We could be more finer
271 # grained but is it worth it for performance While unicode have character in the
273 # grained but is it worth it for performance While unicode have character in the
272 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
274 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
273 # write this). With below range we cover them all, with a density of ~67%
275 # write this). With below range we cover them all, with a density of ~67%
274 # biggest next gap we consider only adds up about 1% density and there are 600
276 # biggest next gap we consider only adds up about 1% density and there are 600
275 # gaps that would need hard coding.
277 # gaps that would need hard coding.
276 _UNICODE_RANGES = [(32, 0x323B0), (0xE0001, 0xE01F0)]
278 _UNICODE_RANGES = [(32, 0x323B0), (0xE0001, 0xE01F0)]
277
279
278 # Public API
280 # Public API
279 __all__ = ["Completer", "IPCompleter"]
281 __all__ = ["Completer", "IPCompleter"]
280
282
281 if sys.platform == 'win32':
283 if sys.platform == 'win32':
282 PROTECTABLES = ' '
284 PROTECTABLES = ' '
283 else:
285 else:
284 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
286 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
285
287
286 # Protect against returning an enormous number of completions which the frontend
288 # Protect against returning an enormous number of completions which the frontend
287 # may have trouble processing.
289 # may have trouble processing.
288 MATCHES_LIMIT = 500
290 MATCHES_LIMIT = 500
289
291
290 # Completion type reported when no type can be inferred.
292 # Completion type reported when no type can be inferred.
291 _UNKNOWN_TYPE = "<unknown>"
293 _UNKNOWN_TYPE = "<unknown>"
292
294
293 # sentinel value to signal lack of a match
295 # sentinel value to signal lack of a match
294 not_found = object()
296 not_found = object()
295
297
296 class ProvisionalCompleterWarning(FutureWarning):
298 class ProvisionalCompleterWarning(FutureWarning):
297 """
299 """
298 Exception raise by an experimental feature in this module.
300 Exception raise by an experimental feature in this module.
299
301
300 Wrap code in :any:`provisionalcompleter` context manager if you
302 Wrap code in :any:`provisionalcompleter` context manager if you
301 are certain you want to use an unstable feature.
303 are certain you want to use an unstable feature.
302 """
304 """
303 pass
305 pass
304
306
305 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
307 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
306
308
307
309
308 @skip_doctest
310 @skip_doctest
309 @contextmanager
311 @contextmanager
310 def provisionalcompleter(action='ignore'):
312 def provisionalcompleter(action='ignore'):
311 """
313 """
312 This context manager has to be used in any place where unstable completer
314 This context manager has to be used in any place where unstable completer
313 behavior and API may be called.
315 behavior and API may be called.
314
316
315 >>> with provisionalcompleter():
317 >>> with provisionalcompleter():
316 ... completer.do_experimental_things() # works
318 ... completer.do_experimental_things() # works
317
319
318 >>> completer.do_experimental_things() # raises.
320 >>> completer.do_experimental_things() # raises.
319
321
320 .. note::
322 .. note::
321
323
322 Unstable
324 Unstable
323
325
324 By using this context manager you agree that the API in use may change
326 By using this context manager you agree that the API in use may change
325 without warning, and that you won't complain if they do so.
327 without warning, and that you won't complain if they do so.
326
328
327 You also understand that, if the API is not to your liking, you should report
329 You also understand that, if the API is not to your liking, you should report
328 a bug to explain your use case upstream.
330 a bug to explain your use case upstream.
329
331
330 We'll be happy to get your feedback, feature requests, and improvements on
332 We'll be happy to get your feedback, feature requests, and improvements on
331 any of the unstable APIs!
333 any of the unstable APIs!
332 """
334 """
333 with warnings.catch_warnings():
335 with warnings.catch_warnings():
334 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
336 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
335 yield
337 yield
336
338
337
339
338 def has_open_quotes(s):
340 def has_open_quotes(s):
339 """Return whether a string has open quotes.
341 """Return whether a string has open quotes.
340
342
341 This simply counts whether the number of quote characters of either type in
343 This simply counts whether the number of quote characters of either type in
342 the string is odd.
344 the string is odd.
343
345
344 Returns
346 Returns
345 -------
347 -------
346 If there is an open quote, the quote character is returned. Else, return
348 If there is an open quote, the quote character is returned. Else, return
347 False.
349 False.
348 """
350 """
349 # We check " first, then ', so complex cases with nested quotes will get
351 # We check " first, then ', so complex cases with nested quotes will get
350 # the " to take precedence.
352 # the " to take precedence.
351 if s.count('"') % 2:
353 if s.count('"') % 2:
352 return '"'
354 return '"'
353 elif s.count("'") % 2:
355 elif s.count("'") % 2:
354 return "'"
356 return "'"
355 else:
357 else:
356 return False
358 return False
357
359
358
360
359 def protect_filename(s, protectables=PROTECTABLES):
361 def protect_filename(s, protectables=PROTECTABLES):
360 """Escape a string to protect certain characters."""
362 """Escape a string to protect certain characters."""
361 if set(s) & set(protectables):
363 if set(s) & set(protectables):
362 if sys.platform == "win32":
364 if sys.platform == "win32":
363 return '"' + s + '"'
365 return '"' + s + '"'
364 else:
366 else:
365 return "".join(("\\" + c if c in protectables else c) for c in s)
367 return "".join(("\\" + c if c in protectables else c) for c in s)
366 else:
368 else:
367 return s
369 return s
368
370
369
371
370 def expand_user(path:str) -> Tuple[str, bool, str]:
372 def expand_user(path:str) -> Tuple[str, bool, str]:
371 """Expand ``~``-style usernames in strings.
373 """Expand ``~``-style usernames in strings.
372
374
373 This is similar to :func:`os.path.expanduser`, but it computes and returns
375 This is similar to :func:`os.path.expanduser`, but it computes and returns
374 extra information that will be useful if the input was being used in
376 extra information that will be useful if the input was being used in
375 computing completions, and you wish to return the completions with the
377 computing completions, and you wish to return the completions with the
376 original '~' instead of its expanded value.
378 original '~' instead of its expanded value.
377
379
378 Parameters
380 Parameters
379 ----------
381 ----------
380 path : str
382 path : str
381 String to be expanded. If no ~ is present, the output is the same as the
383 String to be expanded. If no ~ is present, the output is the same as the
382 input.
384 input.
383
385
384 Returns
386 Returns
385 -------
387 -------
386 newpath : str
388 newpath : str
387 Result of ~ expansion in the input path.
389 Result of ~ expansion in the input path.
388 tilde_expand : bool
390 tilde_expand : bool
389 Whether any expansion was performed or not.
391 Whether any expansion was performed or not.
390 tilde_val : str
392 tilde_val : str
391 The value that ~ was replaced with.
393 The value that ~ was replaced with.
392 """
394 """
393 # Default values
395 # Default values
394 tilde_expand = False
396 tilde_expand = False
395 tilde_val = ''
397 tilde_val = ''
396 newpath = path
398 newpath = path
397
399
398 if path.startswith('~'):
400 if path.startswith('~'):
399 tilde_expand = True
401 tilde_expand = True
400 rest = len(path)-1
402 rest = len(path)-1
401 newpath = os.path.expanduser(path)
403 newpath = os.path.expanduser(path)
402 if rest:
404 if rest:
403 tilde_val = newpath[:-rest]
405 tilde_val = newpath[:-rest]
404 else:
406 else:
405 tilde_val = newpath
407 tilde_val = newpath
406
408
407 return newpath, tilde_expand, tilde_val
409 return newpath, tilde_expand, tilde_val
408
410
409
411
410 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
412 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
411 """Does the opposite of expand_user, with its outputs.
413 """Does the opposite of expand_user, with its outputs.
412 """
414 """
413 if tilde_expand:
415 if tilde_expand:
414 return path.replace(tilde_val, '~')
416 return path.replace(tilde_val, '~')
415 else:
417 else:
416 return path
418 return path
417
419
418
420
419 def completions_sorting_key(word):
421 def completions_sorting_key(word):
420 """key for sorting completions
422 """key for sorting completions
421
423
422 This does several things:
424 This does several things:
423
425
424 - Demote any completions starting with underscores to the end
426 - Demote any completions starting with underscores to the end
425 - Insert any %magic and %%cellmagic completions in the alphabetical order
427 - Insert any %magic and %%cellmagic completions in the alphabetical order
426 by their name
428 by their name
427 """
429 """
428 prio1, prio2 = 0, 0
430 prio1, prio2 = 0, 0
429
431
430 if word.startswith('__'):
432 if word.startswith('__'):
431 prio1 = 2
433 prio1 = 2
432 elif word.startswith('_'):
434 elif word.startswith('_'):
433 prio1 = 1
435 prio1 = 1
434
436
435 if word.endswith('='):
437 if word.endswith('='):
436 prio1 = -1
438 prio1 = -1
437
439
438 if word.startswith('%%'):
440 if word.startswith('%%'):
439 # If there's another % in there, this is something else, so leave it alone
441 # If there's another % in there, this is something else, so leave it alone
440 if not "%" in word[2:]:
442 if not "%" in word[2:]:
441 word = word[2:]
443 word = word[2:]
442 prio2 = 2
444 prio2 = 2
443 elif word.startswith('%'):
445 elif word.startswith('%'):
444 if not "%" in word[1:]:
446 if not "%" in word[1:]:
445 word = word[1:]
447 word = word[1:]
446 prio2 = 1
448 prio2 = 1
447
449
448 return prio1, word, prio2
450 return prio1, word, prio2
449
451
450
452
451 class _FakeJediCompletion:
453 class _FakeJediCompletion:
452 """
454 """
453 This is a workaround to communicate to the UI that Jedi has crashed and to
455 This is a workaround to communicate to the UI that Jedi has crashed and to
454 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
456 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
455
457
456 Added in IPython 6.0 so should likely be removed for 7.0
458 Added in IPython 6.0 so should likely be removed for 7.0
457
459
458 """
460 """
459
461
460 def __init__(self, name):
462 def __init__(self, name):
461
463
462 self.name = name
464 self.name = name
463 self.complete = name
465 self.complete = name
464 self.type = 'crashed'
466 self.type = 'crashed'
465 self.name_with_symbols = name
467 self.name_with_symbols = name
466 self.signature = ""
468 self.signature = ""
467 self._origin = "fake"
469 self._origin = "fake"
468 self.text = "crashed"
470 self.text = "crashed"
469
471
470 def __repr__(self):
472 def __repr__(self):
471 return '<Fake completion object jedi has crashed>'
473 return '<Fake completion object jedi has crashed>'
472
474
473
475
474 _JediCompletionLike = Union["jedi.api.Completion", _FakeJediCompletion]
476 _JediCompletionLike = Union["jedi.api.Completion", _FakeJediCompletion]
475
477
476
478
477 class Completion:
479 class Completion:
478 """
480 """
479 Completion object used and returned by IPython completers.
481 Completion object used and returned by IPython completers.
480
482
481 .. warning::
483 .. warning::
482
484
483 Unstable
485 Unstable
484
486
485 This function is unstable, API may change without warning.
487 This function is unstable, API may change without warning.
486 It will also raise unless use in proper context manager.
488 It will also raise unless use in proper context manager.
487
489
488 This act as a middle ground :any:`Completion` object between the
490 This act as a middle ground :any:`Completion` object between the
489 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
491 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
490 object. While Jedi need a lot of information about evaluator and how the
492 object. While Jedi need a lot of information about evaluator and how the
491 code should be ran/inspected, PromptToolkit (and other frontend) mostly
493 code should be ran/inspected, PromptToolkit (and other frontend) mostly
492 need user facing information.
494 need user facing information.
493
495
494 - Which range should be replaced replaced by what.
496 - Which range should be replaced replaced by what.
495 - Some metadata (like completion type), or meta information to displayed to
497 - Some metadata (like completion type), or meta information to displayed to
496 the use user.
498 the use user.
497
499
498 For debugging purpose we can also store the origin of the completion (``jedi``,
500 For debugging purpose we can also store the origin of the completion (``jedi``,
499 ``IPython.python_matches``, ``IPython.magics_matches``...).
501 ``IPython.python_matches``, ``IPython.magics_matches``...).
500 """
502 """
501
503
502 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
504 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
503
505
504 def __init__(
506 def __init__(
505 self,
507 self,
506 start: int,
508 start: int,
507 end: int,
509 end: int,
508 text: str,
510 text: str,
509 *,
511 *,
510 type: Optional[str] = None,
512 type: Optional[str] = None,
511 _origin="",
513 _origin="",
512 signature="",
514 signature="",
513 ) -> None:
515 ) -> None:
514 warnings.warn(
516 warnings.warn(
515 "``Completion`` is a provisional API (as of IPython 6.0). "
517 "``Completion`` is a provisional API (as of IPython 6.0). "
516 "It may change without warnings. "
518 "It may change without warnings. "
517 "Use in corresponding context manager.",
519 "Use in corresponding context manager.",
518 category=ProvisionalCompleterWarning,
520 category=ProvisionalCompleterWarning,
519 stacklevel=2,
521 stacklevel=2,
520 )
522 )
521
523
522 self.start = start
524 self.start = start
523 self.end = end
525 self.end = end
524 self.text = text
526 self.text = text
525 self.type = type
527 self.type = type
526 self.signature = signature
528 self.signature = signature
527 self._origin = _origin
529 self._origin = _origin
528
530
529 def __repr__(self):
531 def __repr__(self):
530 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
532 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
531 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
533 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
532
534
533 def __eq__(self, other) -> bool:
535 def __eq__(self, other) -> bool:
534 """
536 """
535 Equality and hash do not hash the type (as some completer may not be
537 Equality and hash do not hash the type (as some completer may not be
536 able to infer the type), but are use to (partially) de-duplicate
538 able to infer the type), but are use to (partially) de-duplicate
537 completion.
539 completion.
538
540
539 Completely de-duplicating completion is a bit tricker that just
541 Completely de-duplicating completion is a bit tricker that just
540 comparing as it depends on surrounding text, which Completions are not
542 comparing as it depends on surrounding text, which Completions are not
541 aware of.
543 aware of.
542 """
544 """
543 return self.start == other.start and \
545 return self.start == other.start and \
544 self.end == other.end and \
546 self.end == other.end and \
545 self.text == other.text
547 self.text == other.text
546
548
547 def __hash__(self):
549 def __hash__(self):
548 return hash((self.start, self.end, self.text))
550 return hash((self.start, self.end, self.text))
549
551
550
552
551 class SimpleCompletion:
553 class SimpleCompletion:
552 """Completion item to be included in the dictionary returned by new-style Matcher (API v2).
554 """Completion item to be included in the dictionary returned by new-style Matcher (API v2).
553
555
554 .. warning::
556 .. warning::
555
557
556 Provisional
558 Provisional
557
559
558 This class is used to describe the currently supported attributes of
560 This class is used to describe the currently supported attributes of
559 simple completion items, and any additional implementation details
561 simple completion items, and any additional implementation details
560 should not be relied on. Additional attributes may be included in
562 should not be relied on. Additional attributes may be included in
561 future versions, and meaning of text disambiguated from the current
563 future versions, and meaning of text disambiguated from the current
562 dual meaning of "text to insert" and "text to used as a label".
564 dual meaning of "text to insert" and "text to used as a label".
563 """
565 """
564
566
565 __slots__ = ["text", "type"]
567 __slots__ = ["text", "type"]
566
568
567 def __init__(self, text: str, *, type: Optional[str] = None):
569 def __init__(self, text: str, *, type: Optional[str] = None):
568 self.text = text
570 self.text = text
569 self.type = type
571 self.type = type
570
572
571 def __repr__(self):
573 def __repr__(self):
572 return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
574 return f"<SimpleCompletion text={self.text!r} type={self.type!r}>"
573
575
574
576
575 class _MatcherResultBase(TypedDict):
577 class _MatcherResultBase(TypedDict):
576 """Definition of dictionary to be returned by new-style Matcher (API v2)."""
578 """Definition of dictionary to be returned by new-style Matcher (API v2)."""
577
579
578 #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token.
580 #: Suffix of the provided ``CompletionContext.token``, if not given defaults to full token.
579 matched_fragment: NotRequired[str]
581 matched_fragment: NotRequired[str]
580
582
581 #: Whether to suppress results from all other matchers (True), some
583 #: Whether to suppress results from all other matchers (True), some
582 #: matchers (set of identifiers) or none (False); default is False.
584 #: matchers (set of identifiers) or none (False); default is False.
583 suppress: NotRequired[Union[bool, Set[str]]]
585 suppress: NotRequired[Union[bool, Set[str]]]
584
586
585 #: Identifiers of matchers which should NOT be suppressed when this matcher
587 #: Identifiers of matchers which should NOT be suppressed when this matcher
586 #: requests to suppress all other matchers; defaults to an empty set.
588 #: requests to suppress all other matchers; defaults to an empty set.
587 do_not_suppress: NotRequired[Set[str]]
589 do_not_suppress: NotRequired[Set[str]]
588
590
589 #: Are completions already ordered and should be left as-is? default is False.
591 #: Are completions already ordered and should be left as-is? default is False.
590 ordered: NotRequired[bool]
592 ordered: NotRequired[bool]
591
593
592
594
593 @sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"])
595 @sphinx_options(show_inherited_members=True, exclude_inherited_from=["dict"])
594 class SimpleMatcherResult(_MatcherResultBase, TypedDict):
596 class SimpleMatcherResult(_MatcherResultBase, TypedDict):
595 """Result of new-style completion matcher."""
597 """Result of new-style completion matcher."""
596
598
597 # note: TypedDict is added again to the inheritance chain
599 # note: TypedDict is added again to the inheritance chain
598 # in order to get __orig_bases__ for documentation
600 # in order to get __orig_bases__ for documentation
599
601
600 #: List of candidate completions
602 #: List of candidate completions
601 completions: Sequence[SimpleCompletion] | Iterator[SimpleCompletion]
603 completions: Sequence[SimpleCompletion] | Iterator[SimpleCompletion]
602
604
603
605
604 class _JediMatcherResult(_MatcherResultBase):
606 class _JediMatcherResult(_MatcherResultBase):
605 """Matching result returned by Jedi (will be processed differently)"""
607 """Matching result returned by Jedi (will be processed differently)"""
606
608
607 #: list of candidate completions
609 #: list of candidate completions
608 completions: Iterator[_JediCompletionLike]
610 completions: Iterator[_JediCompletionLike]
609
611
610
612
611 AnyMatcherCompletion = Union[_JediCompletionLike, SimpleCompletion]
613 AnyMatcherCompletion = Union[_JediCompletionLike, SimpleCompletion]
612 AnyCompletion = TypeVar("AnyCompletion", AnyMatcherCompletion, Completion)
614 AnyCompletion = TypeVar("AnyCompletion", AnyMatcherCompletion, Completion)
613
615
614
616
615 @dataclass
617 @dataclass
616 class CompletionContext:
618 class CompletionContext:
617 """Completion context provided as an argument to matchers in the Matcher API v2."""
619 """Completion context provided as an argument to matchers in the Matcher API v2."""
618
620
619 # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`)
621 # rationale: many legacy matchers relied on completer state (`self.text_until_cursor`)
620 # which was not explicitly visible as an argument of the matcher, making any refactor
622 # which was not explicitly visible as an argument of the matcher, making any refactor
621 # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers
623 # prone to errors; by explicitly passing `cursor_position` we can decouple the matchers
622 # from the completer, and make substituting them in sub-classes easier.
624 # from the completer, and make substituting them in sub-classes easier.
623
625
624 #: Relevant fragment of code directly preceding the cursor.
626 #: Relevant fragment of code directly preceding the cursor.
625 #: The extraction of token is implemented via splitter heuristic
627 #: The extraction of token is implemented via splitter heuristic
626 #: (following readline behaviour for legacy reasons), which is user configurable
628 #: (following readline behaviour for legacy reasons), which is user configurable
627 #: (by switching the greedy mode).
629 #: (by switching the greedy mode).
628 token: str
630 token: str
629
631
630 #: The full available content of the editor or buffer
632 #: The full available content of the editor or buffer
631 full_text: str
633 full_text: str
632
634
633 #: Cursor position in the line (the same for ``full_text`` and ``text``).
635 #: Cursor position in the line (the same for ``full_text`` and ``text``).
634 cursor_position: int
636 cursor_position: int
635
637
636 #: Cursor line in ``full_text``.
638 #: Cursor line in ``full_text``.
637 cursor_line: int
639 cursor_line: int
638
640
639 #: The maximum number of completions that will be used downstream.
641 #: The maximum number of completions that will be used downstream.
640 #: Matchers can use this information to abort early.
642 #: Matchers can use this information to abort early.
641 #: The built-in Jedi matcher is currently excepted from this limit.
643 #: The built-in Jedi matcher is currently excepted from this limit.
642 # If not given, return all possible completions.
644 # If not given, return all possible completions.
643 limit: Optional[int]
645 limit: Optional[int]
644
646
645 @cached_property
647 @cached_property
646 def text_until_cursor(self) -> str:
648 def text_until_cursor(self) -> str:
647 return self.line_with_cursor[: self.cursor_position]
649 return self.line_with_cursor[: self.cursor_position]
648
650
649 @cached_property
651 @cached_property
650 def line_with_cursor(self) -> str:
652 def line_with_cursor(self) -> str:
651 return self.full_text.split("\n")[self.cursor_line]
653 return self.full_text.split("\n")[self.cursor_line]
652
654
653
655
654 #: Matcher results for API v2.
656 #: Matcher results for API v2.
655 MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]
657 MatcherResult = Union[SimpleMatcherResult, _JediMatcherResult]
656
658
657
659
658 class _MatcherAPIv1Base(Protocol):
660 class _MatcherAPIv1Base(Protocol):
659 def __call__(self, text: str) -> List[str]:
661 def __call__(self, text: str) -> List[str]:
660 """Call signature."""
662 """Call signature."""
661 ...
663 ...
662
664
663 #: Used to construct the default matcher identifier
665 #: Used to construct the default matcher identifier
664 __qualname__: str
666 __qualname__: str
665
667
666
668
667 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
669 class _MatcherAPIv1Total(_MatcherAPIv1Base, Protocol):
668 #: API version
670 #: API version
669 matcher_api_version: Optional[Literal[1]]
671 matcher_api_version: Optional[Literal[1]]
670
672
671 def __call__(self, text: str) -> List[str]:
673 def __call__(self, text: str) -> List[str]:
672 """Call signature."""
674 """Call signature."""
673 ...
675 ...
674
676
675
677
676 #: Protocol describing Matcher API v1.
678 #: Protocol describing Matcher API v1.
677 MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
679 MatcherAPIv1: TypeAlias = Union[_MatcherAPIv1Base, _MatcherAPIv1Total]
678
680
679
681
680 class MatcherAPIv2(Protocol):
682 class MatcherAPIv2(Protocol):
681 """Protocol describing Matcher API v2."""
683 """Protocol describing Matcher API v2."""
682
684
683 #: API version
685 #: API version
684 matcher_api_version: Literal[2] = 2
686 matcher_api_version: Literal[2] = 2
685
687
686 def __call__(self, context: CompletionContext) -> MatcherResult:
688 def __call__(self, context: CompletionContext) -> MatcherResult:
687 """Call signature."""
689 """Call signature."""
688 ...
690 ...
689
691
690 #: Used to construct the default matcher identifier
692 #: Used to construct the default matcher identifier
691 __qualname__: str
693 __qualname__: str
692
694
693
695
694 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
696 Matcher: TypeAlias = Union[MatcherAPIv1, MatcherAPIv2]
695
697
696
698
697 def _is_matcher_v1(matcher: Matcher) -> TypeGuard[MatcherAPIv1]:
699 def _is_matcher_v1(matcher: Matcher) -> TypeGuard[MatcherAPIv1]:
698 api_version = _get_matcher_api_version(matcher)
700 api_version = _get_matcher_api_version(matcher)
699 return api_version == 1
701 return api_version == 1
700
702
701
703
702 def _is_matcher_v2(matcher: Matcher) -> TypeGuard[MatcherAPIv2]:
704 def _is_matcher_v2(matcher: Matcher) -> TypeGuard[MatcherAPIv2]:
703 api_version = _get_matcher_api_version(matcher)
705 api_version = _get_matcher_api_version(matcher)
704 return api_version == 2
706 return api_version == 2
705
707
706
708
707 def _is_sizable(value: Any) -> TypeGuard[Sized]:
709 def _is_sizable(value: Any) -> TypeGuard[Sized]:
708 """Determines whether objects is sizable"""
710 """Determines whether objects is sizable"""
709 return hasattr(value, "__len__")
711 return hasattr(value, "__len__")
710
712
711
713
712 def _is_iterator(value: Any) -> TypeGuard[Iterator]:
714 def _is_iterator(value: Any) -> TypeGuard[Iterator]:
713 """Determines whether objects is sizable"""
715 """Determines whether objects is sizable"""
714 return hasattr(value, "__next__")
716 return hasattr(value, "__next__")
715
717
716
718
717 def has_any_completions(result: MatcherResult) -> bool:
719 def has_any_completions(result: MatcherResult) -> bool:
718 """Check if any result includes any completions."""
720 """Check if any result includes any completions."""
719 completions = result["completions"]
721 completions = result["completions"]
720 if _is_sizable(completions):
722 if _is_sizable(completions):
721 return len(completions) != 0
723 return len(completions) != 0
722 if _is_iterator(completions):
724 if _is_iterator(completions):
723 try:
725 try:
724 old_iterator = completions
726 old_iterator = completions
725 first = next(old_iterator)
727 first = next(old_iterator)
726 result["completions"] = cast(
728 result["completions"] = cast(
727 Iterator[SimpleCompletion],
729 Iterator[SimpleCompletion],
728 itertools.chain([first], old_iterator),
730 itertools.chain([first], old_iterator),
729 )
731 )
730 return True
732 return True
731 except StopIteration:
733 except StopIteration:
732 return False
734 return False
733 raise ValueError(
735 raise ValueError(
734 "Completions returned by matcher need to be an Iterator or a Sizable"
736 "Completions returned by matcher need to be an Iterator or a Sizable"
735 )
737 )
736
738
737
739
738 def completion_matcher(
740 def completion_matcher(
739 *,
741 *,
740 priority: Optional[float] = None,
742 priority: Optional[float] = None,
741 identifier: Optional[str] = None,
743 identifier: Optional[str] = None,
742 api_version: int = 1,
744 api_version: int = 1,
743 ):
745 ):
744 """Adds attributes describing the matcher.
746 """Adds attributes describing the matcher.
745
747
746 Parameters
748 Parameters
747 ----------
749 ----------
748 priority : Optional[float]
750 priority : Optional[float]
749 The priority of the matcher, determines the order of execution of matchers.
751 The priority of the matcher, determines the order of execution of matchers.
750 Higher priority means that the matcher will be executed first. Defaults to 0.
752 Higher priority means that the matcher will be executed first. Defaults to 0.
751 identifier : Optional[str]
753 identifier : Optional[str]
752 identifier of the matcher allowing users to modify the behaviour via traitlets,
754 identifier of the matcher allowing users to modify the behaviour via traitlets,
753 and also used to for debugging (will be passed as ``origin`` with the completions).
755 and also used to for debugging (will be passed as ``origin`` with the completions).
754
756
755 Defaults to matcher function's ``__qualname__`` (for example,
757 Defaults to matcher function's ``__qualname__`` (for example,
756 ``IPCompleter.file_matcher`` for the built-in matched defined
758 ``IPCompleter.file_matcher`` for the built-in matched defined
757 as a ``file_matcher`` method of the ``IPCompleter`` class).
759 as a ``file_matcher`` method of the ``IPCompleter`` class).
758 api_version: Optional[int]
760 api_version: Optional[int]
759 version of the Matcher API used by this matcher.
761 version of the Matcher API used by this matcher.
760 Currently supported values are 1 and 2.
762 Currently supported values are 1 and 2.
761 Defaults to 1.
763 Defaults to 1.
762 """
764 """
763
765
764 def wrapper(func: Matcher):
766 def wrapper(func: Matcher):
765 func.matcher_priority = priority or 0 # type: ignore
767 func.matcher_priority = priority or 0 # type: ignore
766 func.matcher_identifier = identifier or func.__qualname__ # type: ignore
768 func.matcher_identifier = identifier or func.__qualname__ # type: ignore
767 func.matcher_api_version = api_version # type: ignore
769 func.matcher_api_version = api_version # type: ignore
768 if TYPE_CHECKING:
770 if TYPE_CHECKING:
769 if api_version == 1:
771 if api_version == 1:
770 func = cast(MatcherAPIv1, func)
772 func = cast(MatcherAPIv1, func)
771 elif api_version == 2:
773 elif api_version == 2:
772 func = cast(MatcherAPIv2, func)
774 func = cast(MatcherAPIv2, func)
773 return func
775 return func
774
776
775 return wrapper
777 return wrapper
776
778
777
779
778 def _get_matcher_priority(matcher: Matcher):
780 def _get_matcher_priority(matcher: Matcher):
779 return getattr(matcher, "matcher_priority", 0)
781 return getattr(matcher, "matcher_priority", 0)
780
782
781
783
782 def _get_matcher_id(matcher: Matcher):
784 def _get_matcher_id(matcher: Matcher):
783 return getattr(matcher, "matcher_identifier", matcher.__qualname__)
785 return getattr(matcher, "matcher_identifier", matcher.__qualname__)
784
786
785
787
786 def _get_matcher_api_version(matcher):
788 def _get_matcher_api_version(matcher):
787 return getattr(matcher, "matcher_api_version", 1)
789 return getattr(matcher, "matcher_api_version", 1)
788
790
789
791
790 context_matcher = partial(completion_matcher, api_version=2)
792 context_matcher = partial(completion_matcher, api_version=2)
791
793
792
794
793 _IC = Iterable[Completion]
795 _IC = Iterable[Completion]
794
796
795
797
796 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
798 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
797 """
799 """
798 Deduplicate a set of completions.
800 Deduplicate a set of completions.
799
801
800 .. warning::
802 .. warning::
801
803
802 Unstable
804 Unstable
803
805
804 This function is unstable, API may change without warning.
806 This function is unstable, API may change without warning.
805
807
806 Parameters
808 Parameters
807 ----------
809 ----------
808 text : str
810 text : str
809 text that should be completed.
811 text that should be completed.
810 completions : Iterator[Completion]
812 completions : Iterator[Completion]
811 iterator over the completions to deduplicate
813 iterator over the completions to deduplicate
812
814
813 Yields
815 Yields
814 ------
816 ------
815 `Completions` objects
817 `Completions` objects
816 Completions coming from multiple sources, may be different but end up having
818 Completions coming from multiple sources, may be different but end up having
817 the same effect when applied to ``text``. If this is the case, this will
819 the same effect when applied to ``text``. If this is the case, this will
818 consider completions as equal and only emit the first encountered.
820 consider completions as equal and only emit the first encountered.
819 Not folded in `completions()` yet for debugging purpose, and to detect when
821 Not folded in `completions()` yet for debugging purpose, and to detect when
820 the IPython completer does return things that Jedi does not, but should be
822 the IPython completer does return things that Jedi does not, but should be
821 at some point.
823 at some point.
822 """
824 """
823 completions = list(completions)
825 completions = list(completions)
824 if not completions:
826 if not completions:
825 return
827 return
826
828
827 new_start = min(c.start for c in completions)
829 new_start = min(c.start for c in completions)
828 new_end = max(c.end for c in completions)
830 new_end = max(c.end for c in completions)
829
831
830 seen = set()
832 seen = set()
831 for c in completions:
833 for c in completions:
832 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
834 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
833 if new_text not in seen:
835 if new_text not in seen:
834 yield c
836 yield c
835 seen.add(new_text)
837 seen.add(new_text)
836
838
837
839
838 def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
840 def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
839 """
841 """
840 Rectify a set of completions to all have the same ``start`` and ``end``
842 Rectify a set of completions to all have the same ``start`` and ``end``
841
843
842 .. warning::
844 .. warning::
843
845
844 Unstable
846 Unstable
845
847
846 This function is unstable, API may change without warning.
848 This function is unstable, API may change without warning.
847 It will also raise unless use in proper context manager.
849 It will also raise unless use in proper context manager.
848
850
849 Parameters
851 Parameters
850 ----------
852 ----------
851 text : str
853 text : str
852 text that should be completed.
854 text that should be completed.
853 completions : Iterator[Completion]
855 completions : Iterator[Completion]
854 iterator over the completions to rectify
856 iterator over the completions to rectify
855 _debug : bool
857 _debug : bool
856 Log failed completion
858 Log failed completion
857
859
858 Notes
860 Notes
859 -----
861 -----
860 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
862 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
861 the Jupyter Protocol requires them to behave like so. This will readjust
863 the Jupyter Protocol requires them to behave like so. This will readjust
862 the completion to have the same ``start`` and ``end`` by padding both
864 the completion to have the same ``start`` and ``end`` by padding both
863 extremities with surrounding text.
865 extremities with surrounding text.
864
866
865 During stabilisation should support a ``_debug`` option to log which
867 During stabilisation should support a ``_debug`` option to log which
866 completion are return by the IPython completer and not found in Jedi in
868 completion are return by the IPython completer and not found in Jedi in
867 order to make upstream bug report.
869 order to make upstream bug report.
868 """
870 """
869 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
871 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
870 "It may change without warnings. "
872 "It may change without warnings. "
871 "Use in corresponding context manager.",
873 "Use in corresponding context manager.",
872 category=ProvisionalCompleterWarning, stacklevel=2)
874 category=ProvisionalCompleterWarning, stacklevel=2)
873
875
874 completions = list(completions)
876 completions = list(completions)
875 if not completions:
877 if not completions:
876 return
878 return
877 starts = (c.start for c in completions)
879 starts = (c.start for c in completions)
878 ends = (c.end for c in completions)
880 ends = (c.end for c in completions)
879
881
880 new_start = min(starts)
882 new_start = min(starts)
881 new_end = max(ends)
883 new_end = max(ends)
882
884
883 seen_jedi = set()
885 seen_jedi = set()
884 seen_python_matches = set()
886 seen_python_matches = set()
885 for c in completions:
887 for c in completions:
886 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
888 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
887 if c._origin == 'jedi':
889 if c._origin == 'jedi':
888 seen_jedi.add(new_text)
890 seen_jedi.add(new_text)
889 elif c._origin == "IPCompleter.python_matcher":
891 elif c._origin == "IPCompleter.python_matcher":
890 seen_python_matches.add(new_text)
892 seen_python_matches.add(new_text)
891 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
893 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
892 diff = seen_python_matches.difference(seen_jedi)
894 diff = seen_python_matches.difference(seen_jedi)
893 if diff and _debug:
895 if diff and _debug:
894 print('IPython.python matches have extras:', diff)
896 print('IPython.python matches have extras:', diff)
895
897
896
898
897 if sys.platform == 'win32':
899 if sys.platform == 'win32':
898 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
900 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
899 else:
901 else:
900 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
902 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
901
903
902 GREEDY_DELIMS = ' =\r\n'
904 GREEDY_DELIMS = ' =\r\n'
903
905
904
906
905 class CompletionSplitter(object):
907 class CompletionSplitter(object):
906 """An object to split an input line in a manner similar to readline.
908 """An object to split an input line in a manner similar to readline.
907
909
908 By having our own implementation, we can expose readline-like completion in
910 By having our own implementation, we can expose readline-like completion in
909 a uniform manner to all frontends. This object only needs to be given the
911 a uniform manner to all frontends. This object only needs to be given the
910 line of text to be split and the cursor position on said line, and it
912 line of text to be split and the cursor position on said line, and it
911 returns the 'word' to be completed on at the cursor after splitting the
913 returns the 'word' to be completed on at the cursor after splitting the
912 entire line.
914 entire line.
913
915
914 What characters are used as splitting delimiters can be controlled by
916 What characters are used as splitting delimiters can be controlled by
915 setting the ``delims`` attribute (this is a property that internally
917 setting the ``delims`` attribute (this is a property that internally
916 automatically builds the necessary regular expression)"""
918 automatically builds the necessary regular expression)"""
917
919
918 # Private interface
920 # Private interface
919
921
920 # A string of delimiter characters. The default value makes sense for
922 # A string of delimiter characters. The default value makes sense for
921 # IPython's most typical usage patterns.
923 # IPython's most typical usage patterns.
922 _delims = DELIMS
924 _delims = DELIMS
923
925
924 # The expression (a normal string) to be compiled into a regular expression
926 # The expression (a normal string) to be compiled into a regular expression
925 # for actual splitting. We store it as an attribute mostly for ease of
927 # for actual splitting. We store it as an attribute mostly for ease of
926 # debugging, since this type of code can be so tricky to debug.
928 # debugging, since this type of code can be so tricky to debug.
927 _delim_expr = None
929 _delim_expr = None
928
930
929 # The regular expression that does the actual splitting
931 # The regular expression that does the actual splitting
930 _delim_re = None
932 _delim_re = None
931
933
932 def __init__(self, delims=None):
934 def __init__(self, delims=None):
933 delims = CompletionSplitter._delims if delims is None else delims
935 delims = CompletionSplitter._delims if delims is None else delims
934 self.delims = delims
936 self.delims = delims
935
937
936 @property
938 @property
937 def delims(self):
939 def delims(self):
938 """Return the string of delimiter characters."""
940 """Return the string of delimiter characters."""
939 return self._delims
941 return self._delims
940
942
941 @delims.setter
943 @delims.setter
942 def delims(self, delims):
944 def delims(self, delims):
943 """Set the delimiters for line splitting."""
945 """Set the delimiters for line splitting."""
944 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
946 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
945 self._delim_re = re.compile(expr)
947 self._delim_re = re.compile(expr)
946 self._delims = delims
948 self._delims = delims
947 self._delim_expr = expr
949 self._delim_expr = expr
948
950
949 def split_line(self, line, cursor_pos=None):
951 def split_line(self, line, cursor_pos=None):
950 """Split a line of text with a cursor at the given position.
952 """Split a line of text with a cursor at the given position.
951 """
953 """
952 l = line if cursor_pos is None else line[:cursor_pos]
954 l = line if cursor_pos is None else line[:cursor_pos]
953 return self._delim_re.split(l)[-1]
955 return self._delim_re.split(l)[-1]
954
956
955
957
956
958
957 class Completer(Configurable):
959 class Completer(Configurable):
958
960
959 greedy = Bool(
961 greedy = Bool(
960 False,
962 False,
961 help="""Activate greedy completion.
963 help="""Activate greedy completion.
962
964
963 .. deprecated:: 8.8
965 .. deprecated:: 8.8
964 Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead.
966 Use :std:configtrait:`Completer.evaluation` and :std:configtrait:`Completer.auto_close_dict_keys` instead.
965
967
966 When enabled in IPython 8.8 or newer, changes configuration as follows:
968 When enabled in IPython 8.8 or newer, changes configuration as follows:
967
969
968 - ``Completer.evaluation = 'unsafe'``
970 - ``Completer.evaluation = 'unsafe'``
969 - ``Completer.auto_close_dict_keys = True``
971 - ``Completer.auto_close_dict_keys = True``
970 """,
972 """,
971 ).tag(config=True)
973 ).tag(config=True)
972
974
973 evaluation = Enum(
975 evaluation = Enum(
974 ("forbidden", "minimal", "limited", "unsafe", "dangerous"),
976 ("forbidden", "minimal", "limited", "unsafe", "dangerous"),
975 default_value="limited",
977 default_value="limited",
976 help="""Policy for code evaluation under completion.
978 help="""Policy for code evaluation under completion.
977
979
978 Successive options allow to enable more eager evaluation for better
980 Successive options allow to enable more eager evaluation for better
979 completion suggestions, including for nested dictionaries, nested lists,
981 completion suggestions, including for nested dictionaries, nested lists,
980 or even results of function calls.
982 or even results of function calls.
981 Setting ``unsafe`` or higher can lead to evaluation of arbitrary user
983 Setting ``unsafe`` or higher can lead to evaluation of arbitrary user
982 code on :kbd:`Tab` with potentially unwanted or dangerous side effects.
984 code on :kbd:`Tab` with potentially unwanted or dangerous side effects.
983
985
984 Allowed values are:
986 Allowed values are:
985
987
986 - ``forbidden``: no evaluation of code is permitted,
988 - ``forbidden``: no evaluation of code is permitted,
987 - ``minimal``: evaluation of literals and access to built-in namespace;
989 - ``minimal``: evaluation of literals and access to built-in namespace;
988 no item/attribute evaluationm no access to locals/globals,
990 no item/attribute evaluationm no access to locals/globals,
989 no evaluation of any operations or comparisons.
991 no evaluation of any operations or comparisons.
990 - ``limited``: access to all namespaces, evaluation of hard-coded methods
992 - ``limited``: access to all namespaces, evaluation of hard-coded methods
991 (for example: :any:`dict.keys`, :any:`object.__getattr__`,
993 (for example: :any:`dict.keys`, :any:`object.__getattr__`,
992 :any:`object.__getitem__`) on allow-listed objects (for example:
994 :any:`object.__getitem__`) on allow-listed objects (for example:
993 :any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``),
995 :any:`dict`, :any:`list`, :any:`tuple`, ``pandas.Series``),
994 - ``unsafe``: evaluation of all methods and function calls but not of
996 - ``unsafe``: evaluation of all methods and function calls but not of
995 syntax with side-effects like `del x`,
997 syntax with side-effects like `del x`,
996 - ``dangerous``: completely arbitrary evaluation.
998 - ``dangerous``: completely arbitrary evaluation.
997 """,
999 """,
998 ).tag(config=True)
1000 ).tag(config=True)
999
1001
1000 use_jedi = Bool(default_value=JEDI_INSTALLED,
1002 use_jedi = Bool(default_value=JEDI_INSTALLED,
1001 help="Experimental: Use Jedi to generate autocompletions. "
1003 help="Experimental: Use Jedi to generate autocompletions. "
1002 "Default to True if jedi is installed.").tag(config=True)
1004 "Default to True if jedi is installed.").tag(config=True)
1003
1005
1004 jedi_compute_type_timeout = Int(default_value=400,
1006 jedi_compute_type_timeout = Int(default_value=400,
1005 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
1007 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
1006 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
1008 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
1007 performance by preventing jedi to build its cache.
1009 performance by preventing jedi to build its cache.
1008 """).tag(config=True)
1010 """).tag(config=True)
1009
1011
1010 debug = Bool(default_value=False,
1012 debug = Bool(default_value=False,
1011 help='Enable debug for the Completer. Mostly print extra '
1013 help='Enable debug for the Completer. Mostly print extra '
1012 'information for experimental jedi integration.')\
1014 'information for experimental jedi integration.')\
1013 .tag(config=True)
1015 .tag(config=True)
1014
1016
1015 backslash_combining_completions = Bool(True,
1017 backslash_combining_completions = Bool(True,
1016 help="Enable unicode completions, e.g. \\alpha<tab> . "
1018 help="Enable unicode completions, e.g. \\alpha<tab> . "
1017 "Includes completion of latex commands, unicode names, and expanding "
1019 "Includes completion of latex commands, unicode names, and expanding "
1018 "unicode characters back to latex commands.").tag(config=True)
1020 "unicode characters back to latex commands.").tag(config=True)
1019
1021
1020 auto_close_dict_keys = Bool(
1022 auto_close_dict_keys = Bool(
1021 False,
1023 False,
1022 help="""
1024 help="""
1023 Enable auto-closing dictionary keys.
1025 Enable auto-closing dictionary keys.
1024
1026
1025 When enabled string keys will be suffixed with a final quote
1027 When enabled string keys will be suffixed with a final quote
1026 (matching the opening quote), tuple keys will also receive a
1028 (matching the opening quote), tuple keys will also receive a
1027 separating comma if needed, and keys which are final will
1029 separating comma if needed, and keys which are final will
1028 receive a closing bracket (``]``).
1030 receive a closing bracket (``]``).
1029 """,
1031 """,
1030 ).tag(config=True)
1032 ).tag(config=True)
1031
1033
1032 def __init__(self, namespace=None, global_namespace=None, **kwargs):
1034 def __init__(self, namespace=None, global_namespace=None, **kwargs):
1033 """Create a new completer for the command line.
1035 """Create a new completer for the command line.
1034
1036
1035 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
1037 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
1036
1038
1037 If unspecified, the default namespace where completions are performed
1039 If unspecified, the default namespace where completions are performed
1038 is __main__ (technically, __main__.__dict__). Namespaces should be
1040 is __main__ (technically, __main__.__dict__). Namespaces should be
1039 given as dictionaries.
1041 given as dictionaries.
1040
1042
1041 An optional second namespace can be given. This allows the completer
1043 An optional second namespace can be given. This allows the completer
1042 to handle cases where both the local and global scopes need to be
1044 to handle cases where both the local and global scopes need to be
1043 distinguished.
1045 distinguished.
1044 """
1046 """
1045
1047
1046 # Don't bind to namespace quite yet, but flag whether the user wants a
1048 # Don't bind to namespace quite yet, but flag whether the user wants a
1047 # specific namespace or to use __main__.__dict__. This will allow us
1049 # specific namespace or to use __main__.__dict__. This will allow us
1048 # to bind to __main__.__dict__ at completion time, not now.
1050 # to bind to __main__.__dict__ at completion time, not now.
1049 if namespace is None:
1051 if namespace is None:
1050 self.use_main_ns = True
1052 self.use_main_ns = True
1051 else:
1053 else:
1052 self.use_main_ns = False
1054 self.use_main_ns = False
1053 self.namespace = namespace
1055 self.namespace = namespace
1054
1056
1055 # The global namespace, if given, can be bound directly
1057 # The global namespace, if given, can be bound directly
1056 if global_namespace is None:
1058 if global_namespace is None:
1057 self.global_namespace = {}
1059 self.global_namespace = {}
1058 else:
1060 else:
1059 self.global_namespace = global_namespace
1061 self.global_namespace = global_namespace
1060
1062
1061 self.custom_matchers = []
1063 self.custom_matchers = []
1062
1064
1063 super(Completer, self).__init__(**kwargs)
1065 super(Completer, self).__init__(**kwargs)
1064
1066
1065 def complete(self, text, state):
1067 def complete(self, text, state):
1066 """Return the next possible completion for 'text'.
1068 """Return the next possible completion for 'text'.
1067
1069
1068 This is called successively with state == 0, 1, 2, ... until it
1070 This is called successively with state == 0, 1, 2, ... until it
1069 returns None. The completion should begin with 'text'.
1071 returns None. The completion should begin with 'text'.
1070
1072
1071 """
1073 """
1072 if self.use_main_ns:
1074 if self.use_main_ns:
1073 self.namespace = __main__.__dict__
1075 self.namespace = __main__.__dict__
1074
1076
1075 if state == 0:
1077 if state == 0:
1076 if "." in text:
1078 if "." in text:
1077 self.matches = self.attr_matches(text)
1079 self.matches = self.attr_matches(text)
1078 else:
1080 else:
1079 self.matches = self.global_matches(text)
1081 self.matches = self.global_matches(text)
1080 try:
1082 try:
1081 return self.matches[state]
1083 return self.matches[state]
1082 except IndexError:
1084 except IndexError:
1083 return None
1085 return None
1084
1086
1085 def global_matches(self, text):
1087 def global_matches(self, text):
1086 """Compute matches when text is a simple name.
1088 """Compute matches when text is a simple name.
1087
1089
1088 Return a list of all keywords, built-in functions and names currently
1090 Return a list of all keywords, built-in functions and names currently
1089 defined in self.namespace or self.global_namespace that match.
1091 defined in self.namespace or self.global_namespace that match.
1090
1092
1091 """
1093 """
1092 matches = []
1094 matches = []
1093 match_append = matches.append
1095 match_append = matches.append
1094 n = len(text)
1096 n = len(text)
1095 for lst in [
1097 for lst in [
1096 keyword.kwlist,
1098 keyword.kwlist,
1097 builtin_mod.__dict__.keys(),
1099 builtin_mod.__dict__.keys(),
1098 list(self.namespace.keys()),
1100 list(self.namespace.keys()),
1099 list(self.global_namespace.keys()),
1101 list(self.global_namespace.keys()),
1100 ]:
1102 ]:
1101 for word in lst:
1103 for word in lst:
1102 if word[:n] == text and word != "__builtins__":
1104 if word[:n] == text and word != "__builtins__":
1103 match_append(word)
1105 match_append(word)
1104
1106
1105 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
1107 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
1106 for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
1108 for lst in [list(self.namespace.keys()), list(self.global_namespace.keys())]:
1107 shortened = {
1109 shortened = {
1108 "_".join([sub[0] for sub in word.split("_")]): word
1110 "_".join([sub[0] for sub in word.split("_")]): word
1109 for word in lst
1111 for word in lst
1110 if snake_case_re.match(word)
1112 if snake_case_re.match(word)
1111 }
1113 }
1112 for word in shortened.keys():
1114 for word in shortened.keys():
1113 if word[:n] == text and word != "__builtins__":
1115 if word[:n] == text and word != "__builtins__":
1114 match_append(shortened[word])
1116 match_append(shortened[word])
1115 return matches
1117 return matches
1116
1118
1117 def attr_matches(self, text):
1119 def attr_matches(self, text):
1118 """Compute matches when text contains a dot.
1120 """Compute matches when text contains a dot.
1119
1121
1120 Assuming the text is of the form NAME.NAME....[NAME], and is
1122 Assuming the text is of the form NAME.NAME....[NAME], and is
1121 evaluatable in self.namespace or self.global_namespace, it will be
1123 evaluatable in self.namespace or self.global_namespace, it will be
1122 evaluated and its attributes (as revealed by dir()) are used as
1124 evaluated and its attributes (as revealed by dir()) are used as
1123 possible completions. (For class instances, class members are
1125 possible completions. (For class instances, class members are
1124 also considered.)
1126 also considered.)
1125
1127
1126 WARNING: this can still invoke arbitrary C code, if an object
1128 WARNING: this can still invoke arbitrary C code, if an object
1127 with a __getattr__ hook is evaluated.
1129 with a __getattr__ hook is evaluated.
1128
1130
1129 """
1131 """
1130 return self._attr_matches(text)[0]
1132 return self._attr_matches(text)[0]
1131
1133
1132 def _attr_matches(self, text, include_prefix=True) -> Tuple[Sequence[str], str]:
1134 def _attr_matches(self, text, include_prefix=True) -> Tuple[Sequence[str], str]:
1133 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1135 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
1134 if not m2:
1136 if not m2:
1135 return [], ""
1137 return [], ""
1136 expr, attr = m2.group(1, 2)
1138 expr, attr = m2.group(1, 2)
1137
1139
1138 obj = self._evaluate_expr(expr)
1140 obj = self._evaluate_expr(expr)
1139
1141
1140 if obj is not_found:
1142 if obj is not_found:
1141 return [], ""
1143 return [], ""
1142
1144
1143 if self.limit_to__all__ and hasattr(obj, '__all__'):
1145 if self.limit_to__all__ and hasattr(obj, '__all__'):
1144 words = get__all__entries(obj)
1146 words = get__all__entries(obj)
1145 else:
1147 else:
1146 words = dir2(obj)
1148 words = dir2(obj)
1147
1149
1148 try:
1150 try:
1149 words = generics.complete_object(obj, words)
1151 words = generics.complete_object(obj, words)
1150 except TryNext:
1152 except TryNext:
1151 pass
1153 pass
1152 except AssertionError:
1154 except AssertionError:
1153 raise
1155 raise
1154 except Exception:
1156 except Exception:
1155 # Silence errors from completion function
1157 # Silence errors from completion function
1156 pass
1158 pass
1157 # Build match list to return
1159 # Build match list to return
1158 n = len(attr)
1160 n = len(attr)
1159
1161
1160 # Note: ideally we would just return words here and the prefix
1162 # Note: ideally we would just return words here and the prefix
1161 # reconciliator would know that we intend to append to rather than
1163 # reconciliator would know that we intend to append to rather than
1162 # replace the input text; this requires refactoring to return range
1164 # replace the input text; this requires refactoring to return range
1163 # which ought to be replaced (as does jedi).
1165 # which ought to be replaced (as does jedi).
1164 if include_prefix:
1166 if include_prefix:
1165 tokens = _parse_tokens(expr)
1167 tokens = _parse_tokens(expr)
1166 rev_tokens = reversed(tokens)
1168 rev_tokens = reversed(tokens)
1167 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1169 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1168 name_turn = True
1170 name_turn = True
1169
1171
1170 parts = []
1172 parts = []
1171 for token in rev_tokens:
1173 for token in rev_tokens:
1172 if token.type in skip_over:
1174 if token.type in skip_over:
1173 continue
1175 continue
1174 if token.type == tokenize.NAME and name_turn:
1176 if token.type == tokenize.NAME and name_turn:
1175 parts.append(token.string)
1177 parts.append(token.string)
1176 name_turn = False
1178 name_turn = False
1177 elif (
1179 elif (
1178 token.type == tokenize.OP and token.string == "." and not name_turn
1180 token.type == tokenize.OP and token.string == "." and not name_turn
1179 ):
1181 ):
1180 parts.append(token.string)
1182 parts.append(token.string)
1181 name_turn = True
1183 name_turn = True
1182 else:
1184 else:
1183 # short-circuit if not empty nor name token
1185 # short-circuit if not empty nor name token
1184 break
1186 break
1185
1187
1186 prefix_after_space = "".join(reversed(parts))
1188 prefix_after_space = "".join(reversed(parts))
1187 else:
1189 else:
1188 prefix_after_space = ""
1190 prefix_after_space = ""
1189
1191
1190 return (
1192 return (
1191 ["%s.%s" % (prefix_after_space, w) for w in words if w[:n] == attr],
1193 ["%s.%s" % (prefix_after_space, w) for w in words if w[:n] == attr],
1192 "." + attr,
1194 "." + attr,
1193 )
1195 )
1194
1196
1195 def _evaluate_expr(self, expr):
1197 def _evaluate_expr(self, expr):
1196 obj = not_found
1198 obj = not_found
1197 done = False
1199 done = False
1198 while not done and expr:
1200 while not done and expr:
1199 try:
1201 try:
1200 obj = guarded_eval(
1202 obj = guarded_eval(
1201 expr,
1203 expr,
1202 EvaluationContext(
1204 EvaluationContext(
1203 globals=self.global_namespace,
1205 globals=self.global_namespace,
1204 locals=self.namespace,
1206 locals=self.namespace,
1205 evaluation=self.evaluation,
1207 evaluation=self.evaluation,
1206 ),
1208 ),
1207 )
1209 )
1208 done = True
1210 done = True
1209 except Exception as e:
1211 except Exception as e:
1210 if self.debug:
1212 if self.debug:
1211 print("Evaluation exception", e)
1213 print("Evaluation exception", e)
1212 # trim the expression to remove any invalid prefix
1214 # trim the expression to remove any invalid prefix
1213 # e.g. user starts `(d[`, so we get `expr = '(d'`,
1215 # e.g. user starts `(d[`, so we get `expr = '(d'`,
1214 # where parenthesis is not closed.
1216 # where parenthesis is not closed.
1215 # TODO: make this faster by reusing parts of the computation?
1217 # TODO: make this faster by reusing parts of the computation?
1216 expr = expr[1:]
1218 expr = expr[1:]
1217 return obj
1219 return obj
1218
1220
1219 def get__all__entries(obj):
1221 def get__all__entries(obj):
1220 """returns the strings in the __all__ attribute"""
1222 """returns the strings in the __all__ attribute"""
1221 try:
1223 try:
1222 words = getattr(obj, '__all__')
1224 words = getattr(obj, '__all__')
1223 except:
1225 except:
1224 return []
1226 return []
1225
1227
1226 return [w for w in words if isinstance(w, str)]
1228 return [w for w in words if isinstance(w, str)]
1227
1229
1228
1230
1229 class _DictKeyState(enum.Flag):
1231 class _DictKeyState(enum.Flag):
1230 """Represent state of the key match in context of other possible matches.
1232 """Represent state of the key match in context of other possible matches.
1231
1233
1232 - given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple.
1234 - given `d1 = {'a': 1}` completion on `d1['<tab>` will yield `{'a': END_OF_ITEM}` as there is no tuple.
1233 - given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`.
1235 - given `d2 = {('a', 'b'): 1}`: `d2['a', '<tab>` will yield `{'b': END_OF_TUPLE}` as there is no tuple members to add beyond `'b'`.
1234 - given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added.
1236 - given `d3 = {('a', 'b'): 1}`: `d3['<tab>` will yield `{'a': IN_TUPLE}` as `'a'` can be added.
1235 - given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}`
1237 - given `d4 = {'a': 1, ('a', 'b'): 2}`: `d4['<tab>` will yield `{'a': END_OF_ITEM & END_OF_TUPLE}`
1236 """
1238 """
1237
1239
1238 BASELINE = 0
1240 BASELINE = 0
1239 END_OF_ITEM = enum.auto()
1241 END_OF_ITEM = enum.auto()
1240 END_OF_TUPLE = enum.auto()
1242 END_OF_TUPLE = enum.auto()
1241 IN_TUPLE = enum.auto()
1243 IN_TUPLE = enum.auto()
1242
1244
1243
1245
1244 def _parse_tokens(c):
1246 def _parse_tokens(c):
1245 """Parse tokens even if there is an error."""
1247 """Parse tokens even if there is an error."""
1246 tokens = []
1248 tokens = []
1247 token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__)
1249 token_generator = tokenize.generate_tokens(iter(c.splitlines()).__next__)
1248 while True:
1250 while True:
1249 try:
1251 try:
1250 tokens.append(next(token_generator))
1252 tokens.append(next(token_generator))
1251 except tokenize.TokenError:
1253 except tokenize.TokenError:
1252 return tokens
1254 return tokens
1253 except StopIteration:
1255 except StopIteration:
1254 return tokens
1256 return tokens
1255
1257
1256
1258
1257 def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]:
1259 def _match_number_in_dict_key_prefix(prefix: str) -> Union[str, None]:
1258 """Match any valid Python numeric literal in a prefix of dictionary keys.
1260 """Match any valid Python numeric literal in a prefix of dictionary keys.
1259
1261
1260 References:
1262 References:
1261 - https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals
1263 - https://docs.python.org/3/reference/lexical_analysis.html#numeric-literals
1262 - https://docs.python.org/3/library/tokenize.html
1264 - https://docs.python.org/3/library/tokenize.html
1263 """
1265 """
1264 if prefix[-1].isspace():
1266 if prefix[-1].isspace():
1265 # if user typed a space we do not have anything to complete
1267 # if user typed a space we do not have anything to complete
1266 # even if there was a valid number token before
1268 # even if there was a valid number token before
1267 return None
1269 return None
1268 tokens = _parse_tokens(prefix)
1270 tokens = _parse_tokens(prefix)
1269 rev_tokens = reversed(tokens)
1271 rev_tokens = reversed(tokens)
1270 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1272 skip_over = {tokenize.ENDMARKER, tokenize.NEWLINE}
1271 number = None
1273 number = None
1272 for token in rev_tokens:
1274 for token in rev_tokens:
1273 if token.type in skip_over:
1275 if token.type in skip_over:
1274 continue
1276 continue
1275 if number is None:
1277 if number is None:
1276 if token.type == tokenize.NUMBER:
1278 if token.type == tokenize.NUMBER:
1277 number = token.string
1279 number = token.string
1278 continue
1280 continue
1279 else:
1281 else:
1280 # we did not match a number
1282 # we did not match a number
1281 return None
1283 return None
1282 if token.type == tokenize.OP:
1284 if token.type == tokenize.OP:
1283 if token.string == ",":
1285 if token.string == ",":
1284 break
1286 break
1285 if token.string in {"+", "-"}:
1287 if token.string in {"+", "-"}:
1286 number = token.string + number
1288 number = token.string + number
1287 else:
1289 else:
1288 return None
1290 return None
1289 return number
1291 return number
1290
1292
1291
1293
1292 _INT_FORMATS = {
1294 _INT_FORMATS = {
1293 "0b": bin,
1295 "0b": bin,
1294 "0o": oct,
1296 "0o": oct,
1295 "0x": hex,
1297 "0x": hex,
1296 }
1298 }
1297
1299
1298
1300
1299 def match_dict_keys(
1301 def match_dict_keys(
1300 keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]],
1302 keys: List[Union[str, bytes, Tuple[Union[str, bytes], ...]]],
1301 prefix: str,
1303 prefix: str,
1302 delims: str,
1304 delims: str,
1303 extra_prefix: Optional[Tuple[Union[str, bytes], ...]] = None,
1305 extra_prefix: Optional[Tuple[Union[str, bytes], ...]] = None,
1304 ) -> Tuple[str, int, Dict[str, _DictKeyState]]:
1306 ) -> Tuple[str, int, Dict[str, _DictKeyState]]:
1305 """Used by dict_key_matches, matching the prefix to a list of keys
1307 """Used by dict_key_matches, matching the prefix to a list of keys
1306
1308
1307 Parameters
1309 Parameters
1308 ----------
1310 ----------
1309 keys
1311 keys
1310 list of keys in dictionary currently being completed.
1312 list of keys in dictionary currently being completed.
1311 prefix
1313 prefix
1312 Part of the text already typed by the user. E.g. `mydict[b'fo`
1314 Part of the text already typed by the user. E.g. `mydict[b'fo`
1313 delims
1315 delims
1314 String of delimiters to consider when finding the current key.
1316 String of delimiters to consider when finding the current key.
1315 extra_prefix : optional
1317 extra_prefix : optional
1316 Part of the text already typed in multi-key index cases. E.g. for
1318 Part of the text already typed in multi-key index cases. E.g. for
1317 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
1319 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
1318
1320
1319 Returns
1321 Returns
1320 -------
1322 -------
1321 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1323 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
1322 ``quote`` being the quote that need to be used to close current string.
1324 ``quote`` being the quote that need to be used to close current string.
1323 ``token_start`` the position where the replacement should start occurring,
1325 ``token_start`` the position where the replacement should start occurring,
1324 ``matches`` a dictionary of replacement/completion keys on keys and values
1326 ``matches`` a dictionary of replacement/completion keys on keys and values
1325 indicating whether the state.
1327 indicating whether the state.
1326 """
1328 """
1327 prefix_tuple = extra_prefix if extra_prefix else ()
1329 prefix_tuple = extra_prefix if extra_prefix else ()
1328
1330
1329 prefix_tuple_size = sum(
1331 prefix_tuple_size = sum(
1330 [
1332 [
1331 # for pandas, do not count slices as taking space
1333 # for pandas, do not count slices as taking space
1332 not isinstance(k, slice)
1334 not isinstance(k, slice)
1333 for k in prefix_tuple
1335 for k in prefix_tuple
1334 ]
1336 ]
1335 )
1337 )
1336 text_serializable_types = (str, bytes, int, float, slice)
1338 text_serializable_types = (str, bytes, int, float, slice)
1337
1339
1338 def filter_prefix_tuple(key):
1340 def filter_prefix_tuple(key):
1339 # Reject too short keys
1341 # Reject too short keys
1340 if len(key) <= prefix_tuple_size:
1342 if len(key) <= prefix_tuple_size:
1341 return False
1343 return False
1342 # Reject keys which cannot be serialised to text
1344 # Reject keys which cannot be serialised to text
1343 for k in key:
1345 for k in key:
1344 if not isinstance(k, text_serializable_types):
1346 if not isinstance(k, text_serializable_types):
1345 return False
1347 return False
1346 # Reject keys that do not match the prefix
1348 # Reject keys that do not match the prefix
1347 for k, pt in zip(key, prefix_tuple):
1349 for k, pt in zip(key, prefix_tuple):
1348 if k != pt and not isinstance(pt, slice):
1350 if k != pt and not isinstance(pt, slice):
1349 return False
1351 return False
1350 # All checks passed!
1352 # All checks passed!
1351 return True
1353 return True
1352
1354
1353 filtered_key_is_final: Dict[Union[str, bytes, int, float], _DictKeyState] = (
1355 filtered_key_is_final: Dict[Union[str, bytes, int, float], _DictKeyState] = (
1354 defaultdict(lambda: _DictKeyState.BASELINE)
1356 defaultdict(lambda: _DictKeyState.BASELINE)
1355 )
1357 )
1356
1358
1357 for k in keys:
1359 for k in keys:
1358 # If at least one of the matches is not final, mark as undetermined.
1360 # If at least one of the matches is not final, mark as undetermined.
1359 # This can happen with `d = {111: 'b', (111, 222): 'a'}` where
1361 # This can happen with `d = {111: 'b', (111, 222): 'a'}` where
1360 # `111` appears final on first match but is not final on the second.
1362 # `111` appears final on first match but is not final on the second.
1361
1363
1362 if isinstance(k, tuple):
1364 if isinstance(k, tuple):
1363 if filter_prefix_tuple(k):
1365 if filter_prefix_tuple(k):
1364 key_fragment = k[prefix_tuple_size]
1366 key_fragment = k[prefix_tuple_size]
1365 filtered_key_is_final[key_fragment] |= (
1367 filtered_key_is_final[key_fragment] |= (
1366 _DictKeyState.END_OF_TUPLE
1368 _DictKeyState.END_OF_TUPLE
1367 if len(k) == prefix_tuple_size + 1
1369 if len(k) == prefix_tuple_size + 1
1368 else _DictKeyState.IN_TUPLE
1370 else _DictKeyState.IN_TUPLE
1369 )
1371 )
1370 elif prefix_tuple_size > 0:
1372 elif prefix_tuple_size > 0:
1371 # we are completing a tuple but this key is not a tuple,
1373 # we are completing a tuple but this key is not a tuple,
1372 # so we should ignore it
1374 # so we should ignore it
1373 pass
1375 pass
1374 else:
1376 else:
1375 if isinstance(k, text_serializable_types):
1377 if isinstance(k, text_serializable_types):
1376 filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM
1378 filtered_key_is_final[k] |= _DictKeyState.END_OF_ITEM
1377
1379
1378 filtered_keys = filtered_key_is_final.keys()
1380 filtered_keys = filtered_key_is_final.keys()
1379
1381
1380 if not prefix:
1382 if not prefix:
1381 return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()}
1383 return "", 0, {repr(k): v for k, v in filtered_key_is_final.items()}
1382
1384
1383 quote_match = re.search("(?:\"|')", prefix)
1385 quote_match = re.search("(?:\"|')", prefix)
1384 is_user_prefix_numeric = False
1386 is_user_prefix_numeric = False
1385
1387
1386 if quote_match:
1388 if quote_match:
1387 quote = quote_match.group()
1389 quote = quote_match.group()
1388 valid_prefix = prefix + quote
1390 valid_prefix = prefix + quote
1389 try:
1391 try:
1390 prefix_str = literal_eval(valid_prefix)
1392 prefix_str = literal_eval(valid_prefix)
1391 except Exception:
1393 except Exception:
1392 return "", 0, {}
1394 return "", 0, {}
1393 else:
1395 else:
1394 # If it does not look like a string, let's assume
1396 # If it does not look like a string, let's assume
1395 # we are dealing with a number or variable.
1397 # we are dealing with a number or variable.
1396 number_match = _match_number_in_dict_key_prefix(prefix)
1398 number_match = _match_number_in_dict_key_prefix(prefix)
1397
1399
1398 # We do not want the key matcher to suggest variable names so we yield:
1400 # We do not want the key matcher to suggest variable names so we yield:
1399 if number_match is None:
1401 if number_match is None:
1400 # The alternative would be to assume that user forgort the quote
1402 # The alternative would be to assume that user forgort the quote
1401 # and if the substring matches, suggest adding it at the start.
1403 # and if the substring matches, suggest adding it at the start.
1402 return "", 0, {}
1404 return "", 0, {}
1403
1405
1404 prefix_str = number_match
1406 prefix_str = number_match
1405 is_user_prefix_numeric = True
1407 is_user_prefix_numeric = True
1406 quote = ""
1408 quote = ""
1407
1409
1408 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1410 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
1409 token_match = re.search(pattern, prefix, re.UNICODE)
1411 token_match = re.search(pattern, prefix, re.UNICODE)
1410 assert token_match is not None # silence mypy
1412 assert token_match is not None # silence mypy
1411 token_start = token_match.start()
1413 token_start = token_match.start()
1412 token_prefix = token_match.group()
1414 token_prefix = token_match.group()
1413
1415
1414 matched: Dict[str, _DictKeyState] = {}
1416 matched: Dict[str, _DictKeyState] = {}
1415
1417
1416 str_key: Union[str, bytes]
1418 str_key: Union[str, bytes]
1417
1419
1418 for key in filtered_keys:
1420 for key in filtered_keys:
1419 if isinstance(key, (int, float)):
1421 if isinstance(key, (int, float)):
1420 # User typed a number but this key is not a number.
1422 # User typed a number but this key is not a number.
1421 if not is_user_prefix_numeric:
1423 if not is_user_prefix_numeric:
1422 continue
1424 continue
1423 str_key = str(key)
1425 str_key = str(key)
1424 if isinstance(key, int):
1426 if isinstance(key, int):
1425 int_base = prefix_str[:2].lower()
1427 int_base = prefix_str[:2].lower()
1426 # if user typed integer using binary/oct/hex notation:
1428 # if user typed integer using binary/oct/hex notation:
1427 if int_base in _INT_FORMATS:
1429 if int_base in _INT_FORMATS:
1428 int_format = _INT_FORMATS[int_base]
1430 int_format = _INT_FORMATS[int_base]
1429 str_key = int_format(key)
1431 str_key = int_format(key)
1430 else:
1432 else:
1431 # User typed a string but this key is a number.
1433 # User typed a string but this key is a number.
1432 if is_user_prefix_numeric:
1434 if is_user_prefix_numeric:
1433 continue
1435 continue
1434 str_key = key
1436 str_key = key
1435 try:
1437 try:
1436 if not str_key.startswith(prefix_str):
1438 if not str_key.startswith(prefix_str):
1437 continue
1439 continue
1438 except (AttributeError, TypeError, UnicodeError) as e:
1440 except (AttributeError, TypeError, UnicodeError) as e:
1439 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1441 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
1440 continue
1442 continue
1441
1443
1442 # reformat remainder of key to begin with prefix
1444 # reformat remainder of key to begin with prefix
1443 rem = str_key[len(prefix_str) :]
1445 rem = str_key[len(prefix_str) :]
1444 # force repr wrapped in '
1446 # force repr wrapped in '
1445 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1447 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
1446 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
1448 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
1447 if quote == '"':
1449 if quote == '"':
1448 # The entered prefix is quoted with ",
1450 # The entered prefix is quoted with ",
1449 # but the match is quoted with '.
1451 # but the match is quoted with '.
1450 # A contained " hence needs escaping for comparison:
1452 # A contained " hence needs escaping for comparison:
1451 rem_repr = rem_repr.replace('"', '\\"')
1453 rem_repr = rem_repr.replace('"', '\\"')
1452
1454
1453 # then reinsert prefix from start of token
1455 # then reinsert prefix from start of token
1454 match = "%s%s" % (token_prefix, rem_repr)
1456 match = "%s%s" % (token_prefix, rem_repr)
1455
1457
1456 matched[match] = filtered_key_is_final[key]
1458 matched[match] = filtered_key_is_final[key]
1457 return quote, token_start, matched
1459 return quote, token_start, matched
1458
1460
1459
1461
1460 def cursor_to_position(text:str, line:int, column:int)->int:
1462 def cursor_to_position(text:str, line:int, column:int)->int:
1461 """
1463 """
1462 Convert the (line,column) position of the cursor in text to an offset in a
1464 Convert the (line,column) position of the cursor in text to an offset in a
1463 string.
1465 string.
1464
1466
1465 Parameters
1467 Parameters
1466 ----------
1468 ----------
1467 text : str
1469 text : str
1468 The text in which to calculate the cursor offset
1470 The text in which to calculate the cursor offset
1469 line : int
1471 line : int
1470 Line of the cursor; 0-indexed
1472 Line of the cursor; 0-indexed
1471 column : int
1473 column : int
1472 Column of the cursor 0-indexed
1474 Column of the cursor 0-indexed
1473
1475
1474 Returns
1476 Returns
1475 -------
1477 -------
1476 Position of the cursor in ``text``, 0-indexed.
1478 Position of the cursor in ``text``, 0-indexed.
1477
1479
1478 See Also
1480 See Also
1479 --------
1481 --------
1480 position_to_cursor : reciprocal of this function
1482 position_to_cursor : reciprocal of this function
1481
1483
1482 """
1484 """
1483 lines = text.split('\n')
1485 lines = text.split('\n')
1484 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
1486 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
1485
1487
1486 return sum(len(l) + 1 for l in lines[:line]) + column
1488 return sum(len(l) + 1 for l in lines[:line]) + column
1487
1489
1488 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
1490 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
1489 """
1491 """
1490 Convert the position of the cursor in text (0 indexed) to a line
1492 Convert the position of the cursor in text (0 indexed) to a line
1491 number(0-indexed) and a column number (0-indexed) pair
1493 number(0-indexed) and a column number (0-indexed) pair
1492
1494
1493 Position should be a valid position in ``text``.
1495 Position should be a valid position in ``text``.
1494
1496
1495 Parameters
1497 Parameters
1496 ----------
1498 ----------
1497 text : str
1499 text : str
1498 The text in which to calculate the cursor offset
1500 The text in which to calculate the cursor offset
1499 offset : int
1501 offset : int
1500 Position of the cursor in ``text``, 0-indexed.
1502 Position of the cursor in ``text``, 0-indexed.
1501
1503
1502 Returns
1504 Returns
1503 -------
1505 -------
1504 (line, column) : (int, int)
1506 (line, column) : (int, int)
1505 Line of the cursor; 0-indexed, column of the cursor 0-indexed
1507 Line of the cursor; 0-indexed, column of the cursor 0-indexed
1506
1508
1507 See Also
1509 See Also
1508 --------
1510 --------
1509 cursor_to_position : reciprocal of this function
1511 cursor_to_position : reciprocal of this function
1510
1512
1511 """
1513 """
1512
1514
1513 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
1515 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
1514
1516
1515 before = text[:offset]
1517 before = text[:offset]
1516 blines = before.split('\n') # ! splitnes trim trailing \n
1518 blines = before.split('\n') # ! splitnes trim trailing \n
1517 line = before.count('\n')
1519 line = before.count('\n')
1518 col = len(blines[-1])
1520 col = len(blines[-1])
1519 return line, col
1521 return line, col
1520
1522
1521
1523
1522 def _safe_isinstance(obj, module, class_name, *attrs):
1524 def _safe_isinstance(obj, module, class_name, *attrs):
1523 """Checks if obj is an instance of module.class_name if loaded
1525 """Checks if obj is an instance of module.class_name if loaded
1524 """
1526 """
1525 if module in sys.modules:
1527 if module in sys.modules:
1526 m = sys.modules[module]
1528 m = sys.modules[module]
1527 for attr in [class_name, *attrs]:
1529 for attr in [class_name, *attrs]:
1528 m = getattr(m, attr)
1530 m = getattr(m, attr)
1529 return isinstance(obj, m)
1531 return isinstance(obj, m)
1530
1532
1531
1533
1532 @context_matcher()
1534 @context_matcher()
1533 def back_unicode_name_matcher(context: CompletionContext):
1535 def back_unicode_name_matcher(context: CompletionContext):
1534 """Match Unicode characters back to Unicode name
1536 """Match Unicode characters back to Unicode name
1535
1537
1536 Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
1538 Same as :any:`back_unicode_name_matches`, but adopted to new Matcher API.
1537 """
1539 """
1538 fragment, matches = back_unicode_name_matches(context.text_until_cursor)
1540 fragment, matches = back_unicode_name_matches(context.text_until_cursor)
1539 return _convert_matcher_v1_result_to_v2(
1541 return _convert_matcher_v1_result_to_v2(
1540 matches, type="unicode", fragment=fragment, suppress_if_matches=True
1542 matches, type="unicode", fragment=fragment, suppress_if_matches=True
1541 )
1543 )
1542
1544
1543
1545
1544 def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1546 def back_unicode_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1545 """Match Unicode characters back to Unicode name
1547 """Match Unicode characters back to Unicode name
1546
1548
1547 This does ``β˜ƒ`` -> ``\\snowman``
1549 This does ``β˜ƒ`` -> ``\\snowman``
1548
1550
1549 Note that snowman is not a valid python3 combining character but will be expanded.
1551 Note that snowman is not a valid python3 combining character but will be expanded.
1550 Though it will not recombine back to the snowman character by the completion machinery.
1552 Though it will not recombine back to the snowman character by the completion machinery.
1551
1553
1552 This will not either back-complete standard sequences like \\n, \\b ...
1554 This will not either back-complete standard sequences like \\n, \\b ...
1553
1555
1554 .. deprecated:: 8.6
1556 .. deprecated:: 8.6
1555 You can use :meth:`back_unicode_name_matcher` instead.
1557 You can use :meth:`back_unicode_name_matcher` instead.
1556
1558
1557 Returns
1559 Returns
1558 =======
1560 =======
1559
1561
1560 Return a tuple with two elements:
1562 Return a tuple with two elements:
1561
1563
1562 - The Unicode character that was matched (preceded with a backslash), or
1564 - The Unicode character that was matched (preceded with a backslash), or
1563 empty string,
1565 empty string,
1564 - a sequence (of 1), name for the match Unicode character, preceded by
1566 - a sequence (of 1), name for the match Unicode character, preceded by
1565 backslash, or empty if no match.
1567 backslash, or empty if no match.
1566 """
1568 """
1567 if len(text)<2:
1569 if len(text)<2:
1568 return '', ()
1570 return '', ()
1569 maybe_slash = text[-2]
1571 maybe_slash = text[-2]
1570 if maybe_slash != '\\':
1572 if maybe_slash != '\\':
1571 return '', ()
1573 return '', ()
1572
1574
1573 char = text[-1]
1575 char = text[-1]
1574 # no expand on quote for completion in strings.
1576 # no expand on quote for completion in strings.
1575 # nor backcomplete standard ascii keys
1577 # nor backcomplete standard ascii keys
1576 if char in string.ascii_letters or char in ('"',"'"):
1578 if char in string.ascii_letters or char in ('"',"'"):
1577 return '', ()
1579 return '', ()
1578 try :
1580 try :
1579 unic = unicodedata.name(char)
1581 unic = unicodedata.name(char)
1580 return '\\'+char,('\\'+unic,)
1582 return '\\'+char,('\\'+unic,)
1581 except KeyError:
1583 except KeyError:
1582 pass
1584 pass
1583 return '', ()
1585 return '', ()
1584
1586
1585
1587
1586 @context_matcher()
1588 @context_matcher()
1587 def back_latex_name_matcher(context: CompletionContext):
1589 def back_latex_name_matcher(context: CompletionContext):
1588 """Match latex characters back to unicode name
1590 """Match latex characters back to unicode name
1589
1591
1590 Same as :any:`back_latex_name_matches`, but adopted to new Matcher API.
1592 Same as :any:`back_latex_name_matches`, but adopted to new Matcher API.
1591 """
1593 """
1592 fragment, matches = back_latex_name_matches(context.text_until_cursor)
1594 fragment, matches = back_latex_name_matches(context.text_until_cursor)
1593 return _convert_matcher_v1_result_to_v2(
1595 return _convert_matcher_v1_result_to_v2(
1594 matches, type="latex", fragment=fragment, suppress_if_matches=True
1596 matches, type="latex", fragment=fragment, suppress_if_matches=True
1595 )
1597 )
1596
1598
1597
1599
1598 def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1600 def back_latex_name_matches(text: str) -> Tuple[str, Sequence[str]]:
1599 """Match latex characters back to unicode name
1601 """Match latex characters back to unicode name
1600
1602
1601 This does ``\\β„΅`` -> ``\\aleph``
1603 This does ``\\β„΅`` -> ``\\aleph``
1602
1604
1603 .. deprecated:: 8.6
1605 .. deprecated:: 8.6
1604 You can use :meth:`back_latex_name_matcher` instead.
1606 You can use :meth:`back_latex_name_matcher` instead.
1605 """
1607 """
1606 if len(text)<2:
1608 if len(text)<2:
1607 return '', ()
1609 return '', ()
1608 maybe_slash = text[-2]
1610 maybe_slash = text[-2]
1609 if maybe_slash != '\\':
1611 if maybe_slash != '\\':
1610 return '', ()
1612 return '', ()
1611
1613
1612
1614
1613 char = text[-1]
1615 char = text[-1]
1614 # no expand on quote for completion in strings.
1616 # no expand on quote for completion in strings.
1615 # nor backcomplete standard ascii keys
1617 # nor backcomplete standard ascii keys
1616 if char in string.ascii_letters or char in ('"',"'"):
1618 if char in string.ascii_letters or char in ('"',"'"):
1617 return '', ()
1619 return '', ()
1618 try :
1620 try :
1619 latex = reverse_latex_symbol[char]
1621 latex = reverse_latex_symbol[char]
1620 # '\\' replace the \ as well
1622 # '\\' replace the \ as well
1621 return '\\'+char,[latex]
1623 return '\\'+char,[latex]
1622 except KeyError:
1624 except KeyError:
1623 pass
1625 pass
1624 return '', ()
1626 return '', ()
1625
1627
1626
1628
1627 def _formatparamchildren(parameter) -> str:
1629 def _formatparamchildren(parameter) -> str:
1628 """
1630 """
1629 Get parameter name and value from Jedi Private API
1631 Get parameter name and value from Jedi Private API
1630
1632
1631 Jedi does not expose a simple way to get `param=value` from its API.
1633 Jedi does not expose a simple way to get `param=value` from its API.
1632
1634
1633 Parameters
1635 Parameters
1634 ----------
1636 ----------
1635 parameter
1637 parameter
1636 Jedi's function `Param`
1638 Jedi's function `Param`
1637
1639
1638 Returns
1640 Returns
1639 -------
1641 -------
1640 A string like 'a', 'b=1', '*args', '**kwargs'
1642 A string like 'a', 'b=1', '*args', '**kwargs'
1641
1643
1642 """
1644 """
1643 description = parameter.description
1645 description = parameter.description
1644 if not description.startswith('param '):
1646 if not description.startswith('param '):
1645 raise ValueError('Jedi function parameter description have change format.'
1647 raise ValueError('Jedi function parameter description have change format.'
1646 'Expected "param ...", found %r".' % description)
1648 'Expected "param ...", found %r".' % description)
1647 return description[6:]
1649 return description[6:]
1648
1650
1649 def _make_signature(completion)-> str:
1651 def _make_signature(completion)-> str:
1650 """
1652 """
1651 Make the signature from a jedi completion
1653 Make the signature from a jedi completion
1652
1654
1653 Parameters
1655 Parameters
1654 ----------
1656 ----------
1655 completion : jedi.Completion
1657 completion : jedi.Completion
1656 object does not complete a function type
1658 object does not complete a function type
1657
1659
1658 Returns
1660 Returns
1659 -------
1661 -------
1660 a string consisting of the function signature, with the parenthesis but
1662 a string consisting of the function signature, with the parenthesis but
1661 without the function name. example:
1663 without the function name. example:
1662 `(a, *args, b=1, **kwargs)`
1664 `(a, *args, b=1, **kwargs)`
1663
1665
1664 """
1666 """
1665
1667
1666 # it looks like this might work on jedi 0.17
1668 # it looks like this might work on jedi 0.17
1667 if hasattr(completion, 'get_signatures'):
1669 if hasattr(completion, 'get_signatures'):
1668 signatures = completion.get_signatures()
1670 signatures = completion.get_signatures()
1669 if not signatures:
1671 if not signatures:
1670 return '(?)'
1672 return '(?)'
1671
1673
1672 c0 = completion.get_signatures()[0]
1674 c0 = completion.get_signatures()[0]
1673 return '('+c0.to_string().split('(', maxsplit=1)[1]
1675 return '('+c0.to_string().split('(', maxsplit=1)[1]
1674
1676
1675 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1677 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1676 for p in signature.defined_names()) if f])
1678 for p in signature.defined_names()) if f])
1677
1679
1678
1680
1679 _CompleteResult = Dict[str, MatcherResult]
1681 _CompleteResult = Dict[str, MatcherResult]
1680
1682
1681
1683
1682 DICT_MATCHER_REGEX = re.compile(
1684 DICT_MATCHER_REGEX = re.compile(
1683 r"""(?x)
1685 r"""(?x)
1684 ( # match dict-referring - or any get item object - expression
1686 ( # match dict-referring - or any get item object - expression
1685 .+
1687 .+
1686 )
1688 )
1687 \[ # open bracket
1689 \[ # open bracket
1688 \s* # and optional whitespace
1690 \s* # and optional whitespace
1689 # Capture any number of serializable objects (e.g. "a", "b", 'c')
1691 # Capture any number of serializable objects (e.g. "a", "b", 'c')
1690 # and slices
1692 # and slices
1691 ((?:(?:
1693 ((?:(?:
1692 (?: # closed string
1694 (?: # closed string
1693 [uUbB]? # string prefix (r not handled)
1695 [uUbB]? # string prefix (r not handled)
1694 (?:
1696 (?:
1695 '(?:[^']|(?<!\\)\\')*'
1697 '(?:[^']|(?<!\\)\\')*'
1696 |
1698 |
1697 "(?:[^"]|(?<!\\)\\")*"
1699 "(?:[^"]|(?<!\\)\\")*"
1698 )
1700 )
1699 )
1701 )
1700 |
1702 |
1701 # capture integers and slices
1703 # capture integers and slices
1702 (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
1704 (?:[-+]?\d+)?(?::(?:[-+]?\d+)?){0,2}
1703 |
1705 |
1704 # integer in bin/hex/oct notation
1706 # integer in bin/hex/oct notation
1705 0[bBxXoO]_?(?:\w|\d)+
1707 0[bBxXoO]_?(?:\w|\d)+
1706 )
1708 )
1707 \s*,\s*
1709 \s*,\s*
1708 )*)
1710 )*)
1709 ((?:
1711 ((?:
1710 (?: # unclosed string
1712 (?: # unclosed string
1711 [uUbB]? # string prefix (r not handled)
1713 [uUbB]? # string prefix (r not handled)
1712 (?:
1714 (?:
1713 '(?:[^']|(?<!\\)\\')*
1715 '(?:[^']|(?<!\\)\\')*
1714 |
1716 |
1715 "(?:[^"]|(?<!\\)\\")*
1717 "(?:[^"]|(?<!\\)\\")*
1716 )
1718 )
1717 )
1719 )
1718 |
1720 |
1719 # unfinished integer
1721 # unfinished integer
1720 (?:[-+]?\d+)
1722 (?:[-+]?\d+)
1721 |
1723 |
1722 # integer in bin/hex/oct notation
1724 # integer in bin/hex/oct notation
1723 0[bBxXoO]_?(?:\w|\d)+
1725 0[bBxXoO]_?(?:\w|\d)+
1724 )
1726 )
1725 )?
1727 )?
1726 $
1728 $
1727 """
1729 """
1728 )
1730 )
1729
1731
1730
1732
1731 def _convert_matcher_v1_result_to_v2(
1733 def _convert_matcher_v1_result_to_v2(
1732 matches: Sequence[str],
1734 matches: Sequence[str],
1733 type: str,
1735 type: str,
1734 fragment: Optional[str] = None,
1736 fragment: Optional[str] = None,
1735 suppress_if_matches: bool = False,
1737 suppress_if_matches: bool = False,
1736 ) -> SimpleMatcherResult:
1738 ) -> SimpleMatcherResult:
1737 """Utility to help with transition"""
1739 """Utility to help with transition"""
1738 result = {
1740 result = {
1739 "completions": [SimpleCompletion(text=match, type=type) for match in matches],
1741 "completions": [SimpleCompletion(text=match, type=type) for match in matches],
1740 "suppress": (True if matches else False) if suppress_if_matches else False,
1742 "suppress": (True if matches else False) if suppress_if_matches else False,
1741 }
1743 }
1742 if fragment is not None:
1744 if fragment is not None:
1743 result["matched_fragment"] = fragment
1745 result["matched_fragment"] = fragment
1744 return cast(SimpleMatcherResult, result)
1746 return cast(SimpleMatcherResult, result)
1745
1747
1746
1748
1747 class IPCompleter(Completer):
1749 class IPCompleter(Completer):
1748 """Extension of the completer class with IPython-specific features"""
1750 """Extension of the completer class with IPython-specific features"""
1749
1751
1750 @observe('greedy')
1752 @observe('greedy')
1751 def _greedy_changed(self, change):
1753 def _greedy_changed(self, change):
1752 """update the splitter and readline delims when greedy is changed"""
1754 """update the splitter and readline delims when greedy is changed"""
1753 if change["new"]:
1755 if change["new"]:
1754 self.evaluation = "unsafe"
1756 self.evaluation = "unsafe"
1755 self.auto_close_dict_keys = True
1757 self.auto_close_dict_keys = True
1756 self.splitter.delims = GREEDY_DELIMS
1758 self.splitter.delims = GREEDY_DELIMS
1757 else:
1759 else:
1758 self.evaluation = "limited"
1760 self.evaluation = "limited"
1759 self.auto_close_dict_keys = False
1761 self.auto_close_dict_keys = False
1760 self.splitter.delims = DELIMS
1762 self.splitter.delims = DELIMS
1761
1763
1762 dict_keys_only = Bool(
1764 dict_keys_only = Bool(
1763 False,
1765 False,
1764 help="""
1766 help="""
1765 Whether to show dict key matches only.
1767 Whether to show dict key matches only.
1766
1768
1767 (disables all matchers except for `IPCompleter.dict_key_matcher`).
1769 (disables all matchers except for `IPCompleter.dict_key_matcher`).
1768 """,
1770 """,
1769 )
1771 )
1770
1772
1771 suppress_competing_matchers = UnionTrait(
1773 suppress_competing_matchers = UnionTrait(
1772 [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))],
1774 [Bool(allow_none=True), DictTrait(Bool(None, allow_none=True))],
1773 default_value=None,
1775 default_value=None,
1774 help="""
1776 help="""
1775 Whether to suppress completions from other *Matchers*.
1777 Whether to suppress completions from other *Matchers*.
1776
1778
1777 When set to ``None`` (default) the matchers will attempt to auto-detect
1779 When set to ``None`` (default) the matchers will attempt to auto-detect
1778 whether suppression of other matchers is desirable. For example, at
1780 whether suppression of other matchers is desirable. For example, at
1779 the beginning of a line followed by `%` we expect a magic completion
1781 the beginning of a line followed by `%` we expect a magic completion
1780 to be the only applicable option, and after ``my_dict['`` we usually
1782 to be the only applicable option, and after ``my_dict['`` we usually
1781 expect a completion with an existing dictionary key.
1783 expect a completion with an existing dictionary key.
1782
1784
1783 If you want to disable this heuristic and see completions from all matchers,
1785 If you want to disable this heuristic and see completions from all matchers,
1784 set ``IPCompleter.suppress_competing_matchers = False``.
1786 set ``IPCompleter.suppress_competing_matchers = False``.
1785 To disable the heuristic for specific matchers provide a dictionary mapping:
1787 To disable the heuristic for specific matchers provide a dictionary mapping:
1786 ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``.
1788 ``IPCompleter.suppress_competing_matchers = {'IPCompleter.dict_key_matcher': False}``.
1787
1789
1788 Set ``IPCompleter.suppress_competing_matchers = True`` to limit
1790 Set ``IPCompleter.suppress_competing_matchers = True`` to limit
1789 completions to the set of matchers with the highest priority;
1791 completions to the set of matchers with the highest priority;
1790 this is equivalent to ``IPCompleter.merge_completions`` and
1792 this is equivalent to ``IPCompleter.merge_completions`` and
1791 can be beneficial for performance, but will sometimes omit relevant
1793 can be beneficial for performance, but will sometimes omit relevant
1792 candidates from matchers further down the priority list.
1794 candidates from matchers further down the priority list.
1793 """,
1795 """,
1794 ).tag(config=True)
1796 ).tag(config=True)
1795
1797
1796 merge_completions = Bool(
1798 merge_completions = Bool(
1797 True,
1799 True,
1798 help="""Whether to merge completion results into a single list
1800 help="""Whether to merge completion results into a single list
1799
1801
1800 If False, only the completion results from the first non-empty
1802 If False, only the completion results from the first non-empty
1801 completer will be returned.
1803 completer will be returned.
1802
1804
1803 As of version 8.6.0, setting the value to ``False`` is an alias for:
1805 As of version 8.6.0, setting the value to ``False`` is an alias for:
1804 ``IPCompleter.suppress_competing_matchers = True.``.
1806 ``IPCompleter.suppress_competing_matchers = True.``.
1805 """,
1807 """,
1806 ).tag(config=True)
1808 ).tag(config=True)
1807
1809
1808 disable_matchers = ListTrait(
1810 disable_matchers = ListTrait(
1809 Unicode(),
1811 Unicode(),
1810 help="""List of matchers to disable.
1812 help="""List of matchers to disable.
1811
1813
1812 The list should contain matcher identifiers (see :any:`completion_matcher`).
1814 The list should contain matcher identifiers (see :any:`completion_matcher`).
1813 """,
1815 """,
1814 ).tag(config=True)
1816 ).tag(config=True)
1815
1817
1816 omit__names = Enum(
1818 omit__names = Enum(
1817 (0, 1, 2),
1819 (0, 1, 2),
1818 default_value=2,
1820 default_value=2,
1819 help="""Instruct the completer to omit private method names
1821 help="""Instruct the completer to omit private method names
1820
1822
1821 Specifically, when completing on ``object.<tab>``.
1823 Specifically, when completing on ``object.<tab>``.
1822
1824
1823 When 2 [default]: all names that start with '_' will be excluded.
1825 When 2 [default]: all names that start with '_' will be excluded.
1824
1826
1825 When 1: all 'magic' names (``__foo__``) will be excluded.
1827 When 1: all 'magic' names (``__foo__``) will be excluded.
1826
1828
1827 When 0: nothing will be excluded.
1829 When 0: nothing will be excluded.
1828 """
1830 """
1829 ).tag(config=True)
1831 ).tag(config=True)
1830 limit_to__all__ = Bool(False,
1832 limit_to__all__ = Bool(False,
1831 help="""
1833 help="""
1832 DEPRECATED as of version 5.0.
1834 DEPRECATED as of version 5.0.
1833
1835
1834 Instruct the completer to use __all__ for the completion
1836 Instruct the completer to use __all__ for the completion
1835
1837
1836 Specifically, when completing on ``object.<tab>``.
1838 Specifically, when completing on ``object.<tab>``.
1837
1839
1838 When True: only those names in obj.__all__ will be included.
1840 When True: only those names in obj.__all__ will be included.
1839
1841
1840 When False [default]: the __all__ attribute is ignored
1842 When False [default]: the __all__ attribute is ignored
1841 """,
1843 """,
1842 ).tag(config=True)
1844 ).tag(config=True)
1843
1845
1844 profile_completions = Bool(
1846 profile_completions = Bool(
1845 default_value=False,
1847 default_value=False,
1846 help="If True, emit profiling data for completion subsystem using cProfile."
1848 help="If True, emit profiling data for completion subsystem using cProfile."
1847 ).tag(config=True)
1849 ).tag(config=True)
1848
1850
1849 profiler_output_dir = Unicode(
1851 profiler_output_dir = Unicode(
1850 default_value=".completion_profiles",
1852 default_value=".completion_profiles",
1851 help="Template for path at which to output profile data for completions."
1853 help="Template for path at which to output profile data for completions."
1852 ).tag(config=True)
1854 ).tag(config=True)
1853
1855
1854 @observe('limit_to__all__')
1856 @observe('limit_to__all__')
1855 def _limit_to_all_changed(self, change):
1857 def _limit_to_all_changed(self, change):
1856 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1858 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1857 'value has been deprecated since IPython 5.0, will be made to have '
1859 'value has been deprecated since IPython 5.0, will be made to have '
1858 'no effects and then removed in future version of IPython.',
1860 'no effects and then removed in future version of IPython.',
1859 UserWarning)
1861 UserWarning)
1860
1862
1861 def __init__(
1863 def __init__(
1862 self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
1864 self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
1863 ):
1865 ):
1864 """IPCompleter() -> completer
1866 """IPCompleter() -> completer
1865
1867
1866 Return a completer object.
1868 Return a completer object.
1867
1869
1868 Parameters
1870 Parameters
1869 ----------
1871 ----------
1870 shell
1872 shell
1871 a pointer to the ipython shell itself. This is needed
1873 a pointer to the ipython shell itself. This is needed
1872 because this completer knows about magic functions, and those can
1874 because this completer knows about magic functions, and those can
1873 only be accessed via the ipython instance.
1875 only be accessed via the ipython instance.
1874 namespace : dict, optional
1876 namespace : dict, optional
1875 an optional dict where completions are performed.
1877 an optional dict where completions are performed.
1876 global_namespace : dict, optional
1878 global_namespace : dict, optional
1877 secondary optional dict for completions, to
1879 secondary optional dict for completions, to
1878 handle cases (such as IPython embedded inside functions) where
1880 handle cases (such as IPython embedded inside functions) where
1879 both Python scopes are visible.
1881 both Python scopes are visible.
1880 config : Config
1882 config : Config
1881 traitlet's config object
1883 traitlet's config object
1882 **kwargs
1884 **kwargs
1883 passed to super class unmodified.
1885 passed to super class unmodified.
1884 """
1886 """
1885
1887
1886 self.magic_escape = ESC_MAGIC
1888 self.magic_escape = ESC_MAGIC
1887 self.splitter = CompletionSplitter()
1889 self.splitter = CompletionSplitter()
1888
1890
1889 # _greedy_changed() depends on splitter and readline being defined:
1891 # _greedy_changed() depends on splitter and readline being defined:
1890 super().__init__(
1892 super().__init__(
1891 namespace=namespace,
1893 namespace=namespace,
1892 global_namespace=global_namespace,
1894 global_namespace=global_namespace,
1893 config=config,
1895 config=config,
1894 **kwargs,
1896 **kwargs,
1895 )
1897 )
1896
1898
1897 # List where completion matches will be stored
1899 # List where completion matches will be stored
1898 self.matches = []
1900 self.matches = []
1899 self.shell = shell
1901 self.shell = shell
1900 # Regexp to split filenames with spaces in them
1902 # Regexp to split filenames with spaces in them
1901 self.space_name_re = re.compile(r'([^\\] )')
1903 self.space_name_re = re.compile(r'([^\\] )')
1902 # Hold a local ref. to glob.glob for speed
1904 # Hold a local ref. to glob.glob for speed
1903 self.glob = glob.glob
1905 self.glob = glob.glob
1904
1906
1905 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1907 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1906 # buffers, to avoid completion problems.
1908 # buffers, to avoid completion problems.
1907 term = os.environ.get('TERM','xterm')
1909 term = os.environ.get('TERM','xterm')
1908 self.dumb_terminal = term in ['dumb','emacs']
1910 self.dumb_terminal = term in ['dumb','emacs']
1909
1911
1910 # Special handling of backslashes needed in win32 platforms
1912 # Special handling of backslashes needed in win32 platforms
1911 if sys.platform == "win32":
1913 if sys.platform == "win32":
1912 self.clean_glob = self._clean_glob_win32
1914 self.clean_glob = self._clean_glob_win32
1913 else:
1915 else:
1914 self.clean_glob = self._clean_glob
1916 self.clean_glob = self._clean_glob
1915
1917
1916 #regexp to parse docstring for function signature
1918 #regexp to parse docstring for function signature
1917 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1919 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1918 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1920 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1919 #use this if positional argument name is also needed
1921 #use this if positional argument name is also needed
1920 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1922 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1921
1923
1922 self.magic_arg_matchers = [
1924 self.magic_arg_matchers = [
1923 self.magic_config_matcher,
1925 self.magic_config_matcher,
1924 self.magic_color_matcher,
1926 self.magic_color_matcher,
1925 ]
1927 ]
1926
1928
1927 # This is set externally by InteractiveShell
1929 # This is set externally by InteractiveShell
1928 self.custom_completers = None
1930 self.custom_completers = None
1929
1931
1930 # This is a list of names of unicode characters that can be completed
1932 # This is a list of names of unicode characters that can be completed
1931 # into their corresponding unicode value. The list is large, so we
1933 # into their corresponding unicode value. The list is large, so we
1932 # lazily initialize it on first use. Consuming code should access this
1934 # lazily initialize it on first use. Consuming code should access this
1933 # attribute through the `@unicode_names` property.
1935 # attribute through the `@unicode_names` property.
1934 self._unicode_names = None
1936 self._unicode_names = None
1935
1937
1936 self._backslash_combining_matchers = [
1938 self._backslash_combining_matchers = [
1937 self.latex_name_matcher,
1939 self.latex_name_matcher,
1938 self.unicode_name_matcher,
1940 self.unicode_name_matcher,
1939 back_latex_name_matcher,
1941 back_latex_name_matcher,
1940 back_unicode_name_matcher,
1942 back_unicode_name_matcher,
1941 self.fwd_unicode_matcher,
1943 self.fwd_unicode_matcher,
1942 ]
1944 ]
1943
1945
1944 if not self.backslash_combining_completions:
1946 if not self.backslash_combining_completions:
1945 for matcher in self._backslash_combining_matchers:
1947 for matcher in self._backslash_combining_matchers:
1946 self.disable_matchers.append(_get_matcher_id(matcher))
1948 self.disable_matchers.append(_get_matcher_id(matcher))
1947
1949
1948 if not self.merge_completions:
1950 if not self.merge_completions:
1949 self.suppress_competing_matchers = True
1951 self.suppress_competing_matchers = True
1950
1952
1951 @property
1953 @property
1952 def matchers(self) -> List[Matcher]:
1954 def matchers(self) -> List[Matcher]:
1953 """All active matcher routines for completion"""
1955 """All active matcher routines for completion"""
1954 if self.dict_keys_only:
1956 if self.dict_keys_only:
1955 return [self.dict_key_matcher]
1957 return [self.dict_key_matcher]
1956
1958
1957 if self.use_jedi:
1959 if self.use_jedi:
1958 return [
1960 return [
1959 *self.custom_matchers,
1961 *self.custom_matchers,
1960 *self._backslash_combining_matchers,
1962 *self._backslash_combining_matchers,
1961 *self.magic_arg_matchers,
1963 *self.magic_arg_matchers,
1962 self.custom_completer_matcher,
1964 self.custom_completer_matcher,
1963 self.magic_matcher,
1965 self.magic_matcher,
1964 self._jedi_matcher,
1966 self._jedi_matcher,
1965 self.dict_key_matcher,
1967 self.dict_key_matcher,
1966 self.file_matcher,
1968 self.file_matcher,
1967 ]
1969 ]
1968 else:
1970 else:
1969 return [
1971 return [
1970 *self.custom_matchers,
1972 *self.custom_matchers,
1971 *self._backslash_combining_matchers,
1973 *self._backslash_combining_matchers,
1972 *self.magic_arg_matchers,
1974 *self.magic_arg_matchers,
1973 self.custom_completer_matcher,
1975 self.custom_completer_matcher,
1974 self.dict_key_matcher,
1976 self.dict_key_matcher,
1975 self.magic_matcher,
1977 self.magic_matcher,
1976 self.python_matcher,
1978 self.python_matcher,
1977 self.file_matcher,
1979 self.file_matcher,
1978 self.python_func_kw_matcher,
1980 self.python_func_kw_matcher,
1979 ]
1981 ]
1980
1982
1981 def all_completions(self, text:str) -> List[str]:
1983 def all_completions(self, text:str) -> List[str]:
1982 """
1984 """
1983 Wrapper around the completion methods for the benefit of emacs.
1985 Wrapper around the completion methods for the benefit of emacs.
1984 """
1986 """
1985 prefix = text.rpartition('.')[0]
1987 prefix = text.rpartition('.')[0]
1986 with provisionalcompleter():
1988 with provisionalcompleter():
1987 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1989 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1988 for c in self.completions(text, len(text))]
1990 for c in self.completions(text, len(text))]
1989
1991
1990 return self.complete(text)[1]
1992 return self.complete(text)[1]
1991
1993
1992 def _clean_glob(self, text:str):
1994 def _clean_glob(self, text:str):
1993 return self.glob("%s*" % text)
1995 return self.glob("%s*" % text)
1994
1996
1995 def _clean_glob_win32(self, text:str):
1997 def _clean_glob_win32(self, text:str):
1996 return [f.replace("\\","/")
1998 return [f.replace("\\","/")
1997 for f in self.glob("%s*" % text)]
1999 for f in self.glob("%s*" % text)]
1998
2000
1999 @context_matcher()
2001 @context_matcher()
2000 def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2002 def file_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2001 """Same as :any:`file_matches`, but adopted to new Matcher API."""
2003 """Same as :any:`file_matches`, but adopted to new Matcher API."""
2002 matches = self.file_matches(context.token)
2004 matches = self.file_matches(context.token)
2003 # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter,
2005 # TODO: add a heuristic for suppressing (e.g. if it has OS-specific delimiter,
2004 # starts with `/home/`, `C:\`, etc)
2006 # starts with `/home/`, `C:\`, etc)
2005 return _convert_matcher_v1_result_to_v2(matches, type="path")
2007 return _convert_matcher_v1_result_to_v2(matches, type="path")
2006
2008
2007 def file_matches(self, text: str) -> List[str]:
2009 def file_matches(self, text: str) -> List[str]:
2008 """Match filenames, expanding ~USER type strings.
2010 """Match filenames, expanding ~USER type strings.
2009
2011
2010 Most of the seemingly convoluted logic in this completer is an
2012 Most of the seemingly convoluted logic in this completer is an
2011 attempt to handle filenames with spaces in them. And yet it's not
2013 attempt to handle filenames with spaces in them. And yet it's not
2012 quite perfect, because Python's readline doesn't expose all of the
2014 quite perfect, because Python's readline doesn't expose all of the
2013 GNU readline details needed for this to be done correctly.
2015 GNU readline details needed for this to be done correctly.
2014
2016
2015 For a filename with a space in it, the printed completions will be
2017 For a filename with a space in it, the printed completions will be
2016 only the parts after what's already been typed (instead of the
2018 only the parts after what's already been typed (instead of the
2017 full completions, as is normally done). I don't think with the
2019 full completions, as is normally done). I don't think with the
2018 current (as of Python 2.3) Python readline it's possible to do
2020 current (as of Python 2.3) Python readline it's possible to do
2019 better.
2021 better.
2020
2022
2021 .. deprecated:: 8.6
2023 .. deprecated:: 8.6
2022 You can use :meth:`file_matcher` instead.
2024 You can use :meth:`file_matcher` instead.
2023 """
2025 """
2024
2026
2025 # chars that require escaping with backslash - i.e. chars
2027 # chars that require escaping with backslash - i.e. chars
2026 # that readline treats incorrectly as delimiters, but we
2028 # that readline treats incorrectly as delimiters, but we
2027 # don't want to treat as delimiters in filename matching
2029 # don't want to treat as delimiters in filename matching
2028 # when escaped with backslash
2030 # when escaped with backslash
2029 if text.startswith('!'):
2031 if text.startswith('!'):
2030 text = text[1:]
2032 text = text[1:]
2031 text_prefix = u'!'
2033 text_prefix = u'!'
2032 else:
2034 else:
2033 text_prefix = u''
2035 text_prefix = u''
2034
2036
2035 text_until_cursor = self.text_until_cursor
2037 text_until_cursor = self.text_until_cursor
2036 # track strings with open quotes
2038 # track strings with open quotes
2037 open_quotes = has_open_quotes(text_until_cursor)
2039 open_quotes = has_open_quotes(text_until_cursor)
2038
2040
2039 if '(' in text_until_cursor or '[' in text_until_cursor:
2041 if '(' in text_until_cursor or '[' in text_until_cursor:
2040 lsplit = text
2042 lsplit = text
2041 else:
2043 else:
2042 try:
2044 try:
2043 # arg_split ~ shlex.split, but with unicode bugs fixed by us
2045 # arg_split ~ shlex.split, but with unicode bugs fixed by us
2044 lsplit = arg_split(text_until_cursor)[-1]
2046 lsplit = arg_split(text_until_cursor)[-1]
2045 except ValueError:
2047 except ValueError:
2046 # typically an unmatched ", or backslash without escaped char.
2048 # typically an unmatched ", or backslash without escaped char.
2047 if open_quotes:
2049 if open_quotes:
2048 lsplit = text_until_cursor.split(open_quotes)[-1]
2050 lsplit = text_until_cursor.split(open_quotes)[-1]
2049 else:
2051 else:
2050 return []
2052 return []
2051 except IndexError:
2053 except IndexError:
2052 # tab pressed on empty line
2054 # tab pressed on empty line
2053 lsplit = ""
2055 lsplit = ""
2054
2056
2055 if not open_quotes and lsplit != protect_filename(lsplit):
2057 if not open_quotes and lsplit != protect_filename(lsplit):
2056 # if protectables are found, do matching on the whole escaped name
2058 # if protectables are found, do matching on the whole escaped name
2057 has_protectables = True
2059 has_protectables = True
2058 text0,text = text,lsplit
2060 text0,text = text,lsplit
2059 else:
2061 else:
2060 has_protectables = False
2062 has_protectables = False
2061 text = os.path.expanduser(text)
2063 text = os.path.expanduser(text)
2062
2064
2063 if text == "":
2065 if text == "":
2064 return [text_prefix + protect_filename(f) for f in self.glob("*")]
2066 return [text_prefix + protect_filename(f) for f in self.glob("*")]
2065
2067
2066 # Compute the matches from the filesystem
2068 # Compute the matches from the filesystem
2067 if sys.platform == 'win32':
2069 if sys.platform == 'win32':
2068 m0 = self.clean_glob(text)
2070 m0 = self.clean_glob(text)
2069 else:
2071 else:
2070 m0 = self.clean_glob(text.replace('\\', ''))
2072 m0 = self.clean_glob(text.replace('\\', ''))
2071
2073
2072 if has_protectables:
2074 if has_protectables:
2073 # If we had protectables, we need to revert our changes to the
2075 # If we had protectables, we need to revert our changes to the
2074 # beginning of filename so that we don't double-write the part
2076 # beginning of filename so that we don't double-write the part
2075 # of the filename we have so far
2077 # of the filename we have so far
2076 len_lsplit = len(lsplit)
2078 len_lsplit = len(lsplit)
2077 matches = [text_prefix + text0 +
2079 matches = [text_prefix + text0 +
2078 protect_filename(f[len_lsplit:]) for f in m0]
2080 protect_filename(f[len_lsplit:]) for f in m0]
2079 else:
2081 else:
2080 if open_quotes:
2082 if open_quotes:
2081 # if we have a string with an open quote, we don't need to
2083 # if we have a string with an open quote, we don't need to
2082 # protect the names beyond the quote (and we _shouldn't_, as
2084 # protect the names beyond the quote (and we _shouldn't_, as
2083 # it would cause bugs when the filesystem call is made).
2085 # it would cause bugs when the filesystem call is made).
2084 matches = m0 if sys.platform == "win32" else\
2086 matches = m0 if sys.platform == "win32" else\
2085 [protect_filename(f, open_quotes) for f in m0]
2087 [protect_filename(f, open_quotes) for f in m0]
2086 else:
2088 else:
2087 matches = [text_prefix +
2089 matches = [text_prefix +
2088 protect_filename(f) for f in m0]
2090 protect_filename(f) for f in m0]
2089
2091
2090 # Mark directories in input list by appending '/' to their names.
2092 # Mark directories in input list by appending '/' to their names.
2091 return [x+'/' if os.path.isdir(x) else x for x in matches]
2093 return [x+'/' if os.path.isdir(x) else x for x in matches]
2092
2094
2093 @context_matcher()
2095 @context_matcher()
2094 def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2096 def magic_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2095 """Match magics."""
2097 """Match magics."""
2096 text = context.token
2098 text = context.token
2097 matches = self.magic_matches(text)
2099 matches = self.magic_matches(text)
2098 result = _convert_matcher_v1_result_to_v2(matches, type="magic")
2100 result = _convert_matcher_v1_result_to_v2(matches, type="magic")
2099 is_magic_prefix = len(text) > 0 and text[0] == "%"
2101 is_magic_prefix = len(text) > 0 and text[0] == "%"
2100 result["suppress"] = is_magic_prefix and bool(result["completions"])
2102 result["suppress"] = is_magic_prefix and bool(result["completions"])
2101 return result
2103 return result
2102
2104
2103 def magic_matches(self, text: str):
2105 def magic_matches(self, text: str):
2104 """Match magics.
2106 """Match magics.
2105
2107
2106 .. deprecated:: 8.6
2108 .. deprecated:: 8.6
2107 You can use :meth:`magic_matcher` instead.
2109 You can use :meth:`magic_matcher` instead.
2108 """
2110 """
2109 # Get all shell magics now rather than statically, so magics loaded at
2111 # Get all shell magics now rather than statically, so magics loaded at
2110 # runtime show up too.
2112 # runtime show up too.
2111 lsm = self.shell.magics_manager.lsmagic()
2113 lsm = self.shell.magics_manager.lsmagic()
2112 line_magics = lsm['line']
2114 line_magics = lsm['line']
2113 cell_magics = lsm['cell']
2115 cell_magics = lsm['cell']
2114 pre = self.magic_escape
2116 pre = self.magic_escape
2115 pre2 = pre+pre
2117 pre2 = pre+pre
2116
2118
2117 explicit_magic = text.startswith(pre)
2119 explicit_magic = text.startswith(pre)
2118
2120
2119 # Completion logic:
2121 # Completion logic:
2120 # - user gives %%: only do cell magics
2122 # - user gives %%: only do cell magics
2121 # - user gives %: do both line and cell magics
2123 # - user gives %: do both line and cell magics
2122 # - no prefix: do both
2124 # - no prefix: do both
2123 # In other words, line magics are skipped if the user gives %% explicitly
2125 # In other words, line magics are skipped if the user gives %% explicitly
2124 #
2126 #
2125 # We also exclude magics that match any currently visible names:
2127 # We also exclude magics that match any currently visible names:
2126 # https://github.com/ipython/ipython/issues/4877, unless the user has
2128 # https://github.com/ipython/ipython/issues/4877, unless the user has
2127 # typed a %:
2129 # typed a %:
2128 # https://github.com/ipython/ipython/issues/10754
2130 # https://github.com/ipython/ipython/issues/10754
2129 bare_text = text.lstrip(pre)
2131 bare_text = text.lstrip(pre)
2130 global_matches = self.global_matches(bare_text)
2132 global_matches = self.global_matches(bare_text)
2131 if not explicit_magic:
2133 if not explicit_magic:
2132 def matches(magic):
2134 def matches(magic):
2133 """
2135 """
2134 Filter magics, in particular remove magics that match
2136 Filter magics, in particular remove magics that match
2135 a name present in global namespace.
2137 a name present in global namespace.
2136 """
2138 """
2137 return ( magic.startswith(bare_text) and
2139 return ( magic.startswith(bare_text) and
2138 magic not in global_matches )
2140 magic not in global_matches )
2139 else:
2141 else:
2140 def matches(magic):
2142 def matches(magic):
2141 return magic.startswith(bare_text)
2143 return magic.startswith(bare_text)
2142
2144
2143 comp = [ pre2+m for m in cell_magics if matches(m)]
2145 comp = [ pre2+m for m in cell_magics if matches(m)]
2144 if not text.startswith(pre2):
2146 if not text.startswith(pre2):
2145 comp += [ pre+m for m in line_magics if matches(m)]
2147 comp += [ pre+m for m in line_magics if matches(m)]
2146
2148
2147 return comp
2149 return comp
2148
2150
2149 @context_matcher()
2151 @context_matcher()
2150 def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2152 def magic_config_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2151 """Match class names and attributes for %config magic."""
2153 """Match class names and attributes for %config magic."""
2152 # NOTE: uses `line_buffer` equivalent for compatibility
2154 # NOTE: uses `line_buffer` equivalent for compatibility
2153 matches = self.magic_config_matches(context.line_with_cursor)
2155 matches = self.magic_config_matches(context.line_with_cursor)
2154 return _convert_matcher_v1_result_to_v2(matches, type="param")
2156 return _convert_matcher_v1_result_to_v2(matches, type="param")
2155
2157
2156 def magic_config_matches(self, text: str) -> List[str]:
2158 def magic_config_matches(self, text: str) -> List[str]:
2157 """Match class names and attributes for %config magic.
2159 """Match class names and attributes for %config magic.
2158
2160
2159 .. deprecated:: 8.6
2161 .. deprecated:: 8.6
2160 You can use :meth:`magic_config_matcher` instead.
2162 You can use :meth:`magic_config_matcher` instead.
2161 """
2163 """
2162 texts = text.strip().split()
2164 texts = text.strip().split()
2163
2165
2164 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
2166 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
2165 # get all configuration classes
2167 # get all configuration classes
2166 classes = sorted(set([ c for c in self.shell.configurables
2168 classes = sorted(set([ c for c in self.shell.configurables
2167 if c.__class__.class_traits(config=True)
2169 if c.__class__.class_traits(config=True)
2168 ]), key=lambda x: x.__class__.__name__)
2170 ]), key=lambda x: x.__class__.__name__)
2169 classnames = [ c.__class__.__name__ for c in classes ]
2171 classnames = [ c.__class__.__name__ for c in classes ]
2170
2172
2171 # return all classnames if config or %config is given
2173 # return all classnames if config or %config is given
2172 if len(texts) == 1:
2174 if len(texts) == 1:
2173 return classnames
2175 return classnames
2174
2176
2175 # match classname
2177 # match classname
2176 classname_texts = texts[1].split('.')
2178 classname_texts = texts[1].split('.')
2177 classname = classname_texts[0]
2179 classname = classname_texts[0]
2178 classname_matches = [ c for c in classnames
2180 classname_matches = [ c for c in classnames
2179 if c.startswith(classname) ]
2181 if c.startswith(classname) ]
2180
2182
2181 # return matched classes or the matched class with attributes
2183 # return matched classes or the matched class with attributes
2182 if texts[1].find('.') < 0:
2184 if texts[1].find('.') < 0:
2183 return classname_matches
2185 return classname_matches
2184 elif len(classname_matches) == 1 and \
2186 elif len(classname_matches) == 1 and \
2185 classname_matches[0] == classname:
2187 classname_matches[0] == classname:
2186 cls = classes[classnames.index(classname)].__class__
2188 cls = classes[classnames.index(classname)].__class__
2187 help = cls.class_get_help()
2189 help = cls.class_get_help()
2188 # strip leading '--' from cl-args:
2190 # strip leading '--' from cl-args:
2189 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
2191 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
2190 return [ attr.split('=')[0]
2192 return [ attr.split('=')[0]
2191 for attr in help.strip().splitlines()
2193 for attr in help.strip().splitlines()
2192 if attr.startswith(texts[1]) ]
2194 if attr.startswith(texts[1]) ]
2193 return []
2195 return []
2194
2196
2195 @context_matcher()
2197 @context_matcher()
2196 def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2198 def magic_color_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2197 """Match color schemes for %colors magic."""
2199 """Match color schemes for %colors magic."""
2198 # NOTE: uses `line_buffer` equivalent for compatibility
2200 # NOTE: uses `line_buffer` equivalent for compatibility
2199 matches = self.magic_color_matches(context.line_with_cursor)
2201 matches = self.magic_color_matches(context.line_with_cursor)
2200 return _convert_matcher_v1_result_to_v2(matches, type="param")
2202 return _convert_matcher_v1_result_to_v2(matches, type="param")
2201
2203
2202 def magic_color_matches(self, text: str) -> List[str]:
2204 def magic_color_matches(self, text: str) -> List[str]:
2203 """Match color schemes for %colors magic.
2205 """Match color schemes for %colors magic.
2204
2206
2205 .. deprecated:: 8.6
2207 .. deprecated:: 8.6
2206 You can use :meth:`magic_color_matcher` instead.
2208 You can use :meth:`magic_color_matcher` instead.
2207 """
2209 """
2208 texts = text.split()
2210 texts = text.split()
2209 if text.endswith(' '):
2211 if text.endswith(' '):
2210 # .split() strips off the trailing whitespace. Add '' back
2212 # .split() strips off the trailing whitespace. Add '' back
2211 # so that: '%colors ' -> ['%colors', '']
2213 # so that: '%colors ' -> ['%colors', '']
2212 texts.append('')
2214 texts.append('')
2213
2215
2214 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
2216 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
2215 prefix = texts[1]
2217 prefix = texts[1]
2216 return [ color for color in InspectColors.keys()
2218 return [ color for color in InspectColors.keys()
2217 if color.startswith(prefix) ]
2219 if color.startswith(prefix) ]
2218 return []
2220 return []
2219
2221
2220 @context_matcher(identifier="IPCompleter.jedi_matcher")
2222 @context_matcher(identifier="IPCompleter.jedi_matcher")
2221 def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult:
2223 def _jedi_matcher(self, context: CompletionContext) -> _JediMatcherResult:
2222 matches = self._jedi_matches(
2224 matches = self._jedi_matches(
2223 cursor_column=context.cursor_position,
2225 cursor_column=context.cursor_position,
2224 cursor_line=context.cursor_line,
2226 cursor_line=context.cursor_line,
2225 text=context.full_text,
2227 text=context.full_text,
2226 )
2228 )
2227 return {
2229 return {
2228 "completions": matches,
2230 "completions": matches,
2229 # static analysis should not suppress other matchers
2231 # static analysis should not suppress other matchers
2230 "suppress": False,
2232 "suppress": False,
2231 }
2233 }
2232
2234
2233 def _jedi_matches(
2235 def _jedi_matches(
2234 self, cursor_column: int, cursor_line: int, text: str
2236 self, cursor_column: int, cursor_line: int, text: str
2235 ) -> Iterator[_JediCompletionLike]:
2237 ) -> Iterator[_JediCompletionLike]:
2236 """
2238 """
2237 Return a list of :any:`jedi.api.Completion`\\s object from a ``text`` and
2239 Return a list of :any:`jedi.api.Completion`\\s object from a ``text`` and
2238 cursor position.
2240 cursor position.
2239
2241
2240 Parameters
2242 Parameters
2241 ----------
2243 ----------
2242 cursor_column : int
2244 cursor_column : int
2243 column position of the cursor in ``text``, 0-indexed.
2245 column position of the cursor in ``text``, 0-indexed.
2244 cursor_line : int
2246 cursor_line : int
2245 line position of the cursor in ``text``, 0-indexed
2247 line position of the cursor in ``text``, 0-indexed
2246 text : str
2248 text : str
2247 text to complete
2249 text to complete
2248
2250
2249 Notes
2251 Notes
2250 -----
2252 -----
2251 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
2253 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
2252 object containing a string with the Jedi debug information attached.
2254 object containing a string with the Jedi debug information attached.
2253
2255
2254 .. deprecated:: 8.6
2256 .. deprecated:: 8.6
2255 You can use :meth:`_jedi_matcher` instead.
2257 You can use :meth:`_jedi_matcher` instead.
2256 """
2258 """
2257 namespaces = [self.namespace]
2259 namespaces = [self.namespace]
2258 if self.global_namespace is not None:
2260 if self.global_namespace is not None:
2259 namespaces.append(self.global_namespace)
2261 namespaces.append(self.global_namespace)
2260
2262
2261 completion_filter = lambda x:x
2263 completion_filter = lambda x:x
2262 offset = cursor_to_position(text, cursor_line, cursor_column)
2264 offset = cursor_to_position(text, cursor_line, cursor_column)
2263 # filter output if we are completing for object members
2265 # filter output if we are completing for object members
2264 if offset:
2266 if offset:
2265 pre = text[offset-1]
2267 pre = text[offset-1]
2266 if pre == '.':
2268 if pre == '.':
2267 if self.omit__names == 2:
2269 if self.omit__names == 2:
2268 completion_filter = lambda c:not c.name.startswith('_')
2270 completion_filter = lambda c:not c.name.startswith('_')
2269 elif self.omit__names == 1:
2271 elif self.omit__names == 1:
2270 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
2272 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
2271 elif self.omit__names == 0:
2273 elif self.omit__names == 0:
2272 completion_filter = lambda x:x
2274 completion_filter = lambda x:x
2273 else:
2275 else:
2274 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
2276 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
2275
2277
2276 interpreter = jedi.Interpreter(text[:offset], namespaces)
2278 interpreter = jedi.Interpreter(text[:offset], namespaces)
2277 try_jedi = True
2279 try_jedi = True
2278
2280
2279 try:
2281 try:
2280 # find the first token in the current tree -- if it is a ' or " then we are in a string
2282 # find the first token in the current tree -- if it is a ' or " then we are in a string
2281 completing_string = False
2283 completing_string = False
2282 try:
2284 try:
2283 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
2285 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
2284 except StopIteration:
2286 except StopIteration:
2285 pass
2287 pass
2286 else:
2288 else:
2287 # note the value may be ', ", or it may also be ''' or """, or
2289 # note the value may be ', ", or it may also be ''' or """, or
2288 # in some cases, """what/you/typed..., but all of these are
2290 # in some cases, """what/you/typed..., but all of these are
2289 # strings.
2291 # strings.
2290 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
2292 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
2291
2293
2292 # if we are in a string jedi is likely not the right candidate for
2294 # if we are in a string jedi is likely not the right candidate for
2293 # now. Skip it.
2295 # now. Skip it.
2294 try_jedi = not completing_string
2296 try_jedi = not completing_string
2295 except Exception as e:
2297 except Exception as e:
2296 # many of things can go wrong, we are using private API just don't crash.
2298 # many of things can go wrong, we are using private API just don't crash.
2297 if self.debug:
2299 if self.debug:
2298 print("Error detecting if completing a non-finished string :", e, '|')
2300 print("Error detecting if completing a non-finished string :", e, '|')
2299
2301
2300 if not try_jedi:
2302 if not try_jedi:
2301 return iter([])
2303 return iter([])
2302 try:
2304 try:
2303 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
2305 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
2304 except Exception as e:
2306 except Exception as e:
2305 if self.debug:
2307 if self.debug:
2306 return iter(
2308 return iter(
2307 [
2309 [
2308 _FakeJediCompletion(
2310 _FakeJediCompletion(
2309 'Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""'
2311 'Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""'
2310 % (e)
2312 % (e)
2311 )
2313 )
2312 ]
2314 ]
2313 )
2315 )
2314 else:
2316 else:
2315 return iter([])
2317 return iter([])
2316
2318
2317 @context_matcher()
2319 @context_matcher()
2318 def python_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2320 def python_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2319 """Match attributes or global python names"""
2321 """Match attributes or global python names"""
2320 text = context.line_with_cursor
2322 text = context.line_with_cursor
2321 if "." in text:
2323 if "." in text:
2322 try:
2324 try:
2323 matches, fragment = self._attr_matches(text, include_prefix=False)
2325 matches, fragment = self._attr_matches(text, include_prefix=False)
2324 if text.endswith(".") and self.omit__names:
2326 if text.endswith(".") and self.omit__names:
2325 if self.omit__names == 1:
2327 if self.omit__names == 1:
2326 # true if txt is _not_ a __ name, false otherwise:
2328 # true if txt is _not_ a __ name, false otherwise:
2327 no__name = lambda txt: re.match(r".*\.__.*?__", txt) is None
2329 no__name = lambda txt: re.match(r".*\.__.*?__", txt) is None
2328 else:
2330 else:
2329 # true if txt is _not_ a _ name, false otherwise:
2331 # true if txt is _not_ a _ name, false otherwise:
2330 no__name = (
2332 no__name = (
2331 lambda txt: re.match(r"\._.*?", txt[txt.rindex(".") :])
2333 lambda txt: re.match(r"\._.*?", txt[txt.rindex(".") :])
2332 is None
2334 is None
2333 )
2335 )
2334 matches = filter(no__name, matches)
2336 matches = filter(no__name, matches)
2335 return _convert_matcher_v1_result_to_v2(
2337 return _convert_matcher_v1_result_to_v2(
2336 matches, type="attribute", fragment=fragment
2338 matches, type="attribute", fragment=fragment
2337 )
2339 )
2338 except NameError:
2340 except NameError:
2339 # catches <undefined attributes>.<tab>
2341 # catches <undefined attributes>.<tab>
2340 matches = []
2342 matches = []
2341 return _convert_matcher_v1_result_to_v2(matches, type="attribute")
2343 return _convert_matcher_v1_result_to_v2(matches, type="attribute")
2342 else:
2344 else:
2343 matches = self.global_matches(context.token)
2345 matches = self.global_matches(context.token)
2344 # TODO: maybe distinguish between functions, modules and just "variables"
2346 # TODO: maybe distinguish between functions, modules and just "variables"
2345 return _convert_matcher_v1_result_to_v2(matches, type="variable")
2347 return _convert_matcher_v1_result_to_v2(matches, type="variable")
2346
2348
2347 @completion_matcher(api_version=1)
2349 @completion_matcher(api_version=1)
2348 def python_matches(self, text: str) -> Iterable[str]:
2350 def python_matches(self, text: str) -> Iterable[str]:
2349 """Match attributes or global python names.
2351 """Match attributes or global python names.
2350
2352
2351 .. deprecated:: 8.27
2353 .. deprecated:: 8.27
2352 You can use :meth:`python_matcher` instead."""
2354 You can use :meth:`python_matcher` instead."""
2353 if "." in text:
2355 if "." in text:
2354 try:
2356 try:
2355 matches = self.attr_matches(text)
2357 matches = self.attr_matches(text)
2356 if text.endswith('.') and self.omit__names:
2358 if text.endswith('.') and self.omit__names:
2357 if self.omit__names == 1:
2359 if self.omit__names == 1:
2358 # true if txt is _not_ a __ name, false otherwise:
2360 # true if txt is _not_ a __ name, false otherwise:
2359 no__name = (lambda txt:
2361 no__name = (lambda txt:
2360 re.match(r'.*\.__.*?__',txt) is None)
2362 re.match(r'.*\.__.*?__',txt) is None)
2361 else:
2363 else:
2362 # true if txt is _not_ a _ name, false otherwise:
2364 # true if txt is _not_ a _ name, false otherwise:
2363 no__name = (lambda txt:
2365 no__name = (lambda txt:
2364 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
2366 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
2365 matches = filter(no__name, matches)
2367 matches = filter(no__name, matches)
2366 except NameError:
2368 except NameError:
2367 # catches <undefined attributes>.<tab>
2369 # catches <undefined attributes>.<tab>
2368 matches = []
2370 matches = []
2369 else:
2371 else:
2370 matches = self.global_matches(text)
2372 matches = self.global_matches(text)
2371 return matches
2373 return matches
2372
2374
2373 def _default_arguments_from_docstring(self, doc):
2375 def _default_arguments_from_docstring(self, doc):
2374 """Parse the first line of docstring for call signature.
2376 """Parse the first line of docstring for call signature.
2375
2377
2376 Docstring should be of the form 'min(iterable[, key=func])\n'.
2378 Docstring should be of the form 'min(iterable[, key=func])\n'.
2377 It can also parse cython docstring of the form
2379 It can also parse cython docstring of the form
2378 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
2380 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
2379 """
2381 """
2380 if doc is None:
2382 if doc is None:
2381 return []
2383 return []
2382
2384
2383 #care only the firstline
2385 #care only the firstline
2384 line = doc.lstrip().splitlines()[0]
2386 line = doc.lstrip().splitlines()[0]
2385
2387
2386 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
2388 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
2387 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
2389 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
2388 sig = self.docstring_sig_re.search(line)
2390 sig = self.docstring_sig_re.search(line)
2389 if sig is None:
2391 if sig is None:
2390 return []
2392 return []
2391 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
2393 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
2392 sig = sig.groups()[0].split(',')
2394 sig = sig.groups()[0].split(',')
2393 ret = []
2395 ret = []
2394 for s in sig:
2396 for s in sig:
2395 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
2397 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
2396 ret += self.docstring_kwd_re.findall(s)
2398 ret += self.docstring_kwd_re.findall(s)
2397 return ret
2399 return ret
2398
2400
2399 def _default_arguments(self, obj):
2401 def _default_arguments(self, obj):
2400 """Return the list of default arguments of obj if it is callable,
2402 """Return the list of default arguments of obj if it is callable,
2401 or empty list otherwise."""
2403 or empty list otherwise."""
2402 call_obj = obj
2404 call_obj = obj
2403 ret = []
2405 ret = []
2404 if inspect.isbuiltin(obj):
2406 if inspect.isbuiltin(obj):
2405 pass
2407 pass
2406 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
2408 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
2407 if inspect.isclass(obj):
2409 if inspect.isclass(obj):
2408 #for cython embedsignature=True the constructor docstring
2410 #for cython embedsignature=True the constructor docstring
2409 #belongs to the object itself not __init__
2411 #belongs to the object itself not __init__
2410 ret += self._default_arguments_from_docstring(
2412 ret += self._default_arguments_from_docstring(
2411 getattr(obj, '__doc__', ''))
2413 getattr(obj, '__doc__', ''))
2412 # for classes, check for __init__,__new__
2414 # for classes, check for __init__,__new__
2413 call_obj = (getattr(obj, '__init__', None) or
2415 call_obj = (getattr(obj, '__init__', None) or
2414 getattr(obj, '__new__', None))
2416 getattr(obj, '__new__', None))
2415 # for all others, check if they are __call__able
2417 # for all others, check if they are __call__able
2416 elif hasattr(obj, '__call__'):
2418 elif hasattr(obj, '__call__'):
2417 call_obj = obj.__call__
2419 call_obj = obj.__call__
2418 ret += self._default_arguments_from_docstring(
2420 ret += self._default_arguments_from_docstring(
2419 getattr(call_obj, '__doc__', ''))
2421 getattr(call_obj, '__doc__', ''))
2420
2422
2421 _keeps = (inspect.Parameter.KEYWORD_ONLY,
2423 _keeps = (inspect.Parameter.KEYWORD_ONLY,
2422 inspect.Parameter.POSITIONAL_OR_KEYWORD)
2424 inspect.Parameter.POSITIONAL_OR_KEYWORD)
2423
2425
2424 try:
2426 try:
2425 sig = inspect.signature(obj)
2427 sig = inspect.signature(obj)
2426 ret.extend(k for k, v in sig.parameters.items() if
2428 ret.extend(k for k, v in sig.parameters.items() if
2427 v.kind in _keeps)
2429 v.kind in _keeps)
2428 except ValueError:
2430 except ValueError:
2429 pass
2431 pass
2430
2432
2431 return list(set(ret))
2433 return list(set(ret))
2432
2434
2433 @context_matcher()
2435 @context_matcher()
2434 def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2436 def python_func_kw_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2435 """Match named parameters (kwargs) of the last open function."""
2437 """Match named parameters (kwargs) of the last open function."""
2436 matches = self.python_func_kw_matches(context.token)
2438 matches = self.python_func_kw_matches(context.token)
2437 return _convert_matcher_v1_result_to_v2(matches, type="param")
2439 return _convert_matcher_v1_result_to_v2(matches, type="param")
2438
2440
2439 def python_func_kw_matches(self, text):
2441 def python_func_kw_matches(self, text):
2440 """Match named parameters (kwargs) of the last open function.
2442 """Match named parameters (kwargs) of the last open function.
2441
2443
2442 .. deprecated:: 8.6
2444 .. deprecated:: 8.6
2443 You can use :meth:`python_func_kw_matcher` instead.
2445 You can use :meth:`python_func_kw_matcher` instead.
2444 """
2446 """
2445
2447
2446 if "." in text: # a parameter cannot be dotted
2448 if "." in text: # a parameter cannot be dotted
2447 return []
2449 return []
2448 try: regexp = self.__funcParamsRegex
2450 try: regexp = self.__funcParamsRegex
2449 except AttributeError:
2451 except AttributeError:
2450 regexp = self.__funcParamsRegex = re.compile(r'''
2452 regexp = self.__funcParamsRegex = re.compile(r'''
2451 '.*?(?<!\\)' | # single quoted strings or
2453 '.*?(?<!\\)' | # single quoted strings or
2452 ".*?(?<!\\)" | # double quoted strings or
2454 ".*?(?<!\\)" | # double quoted strings or
2453 \w+ | # identifier
2455 \w+ | # identifier
2454 \S # other characters
2456 \S # other characters
2455 ''', re.VERBOSE | re.DOTALL)
2457 ''', re.VERBOSE | re.DOTALL)
2456 # 1. find the nearest identifier that comes before an unclosed
2458 # 1. find the nearest identifier that comes before an unclosed
2457 # parenthesis before the cursor
2459 # parenthesis before the cursor
2458 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
2460 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
2459 tokens = regexp.findall(self.text_until_cursor)
2461 tokens = regexp.findall(self.text_until_cursor)
2460 iterTokens = reversed(tokens); openPar = 0
2462 iterTokens = reversed(tokens); openPar = 0
2461
2463
2462 for token in iterTokens:
2464 for token in iterTokens:
2463 if token == ')':
2465 if token == ')':
2464 openPar -= 1
2466 openPar -= 1
2465 elif token == '(':
2467 elif token == '(':
2466 openPar += 1
2468 openPar += 1
2467 if openPar > 0:
2469 if openPar > 0:
2468 # found the last unclosed parenthesis
2470 # found the last unclosed parenthesis
2469 break
2471 break
2470 else:
2472 else:
2471 return []
2473 return []
2472 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
2474 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
2473 ids = []
2475 ids = []
2474 isId = re.compile(r'\w+$').match
2476 isId = re.compile(r'\w+$').match
2475
2477
2476 while True:
2478 while True:
2477 try:
2479 try:
2478 ids.append(next(iterTokens))
2480 ids.append(next(iterTokens))
2479 if not isId(ids[-1]):
2481 if not isId(ids[-1]):
2480 ids.pop(); break
2482 ids.pop(); break
2481 if not next(iterTokens) == '.':
2483 if not next(iterTokens) == '.':
2482 break
2484 break
2483 except StopIteration:
2485 except StopIteration:
2484 break
2486 break
2485
2487
2486 # Find all named arguments already assigned to, as to avoid suggesting
2488 # Find all named arguments already assigned to, as to avoid suggesting
2487 # them again
2489 # them again
2488 usedNamedArgs = set()
2490 usedNamedArgs = set()
2489 par_level = -1
2491 par_level = -1
2490 for token, next_token in zip(tokens, tokens[1:]):
2492 for token, next_token in zip(tokens, tokens[1:]):
2491 if token == '(':
2493 if token == '(':
2492 par_level += 1
2494 par_level += 1
2493 elif token == ')':
2495 elif token == ')':
2494 par_level -= 1
2496 par_level -= 1
2495
2497
2496 if par_level != 0:
2498 if par_level != 0:
2497 continue
2499 continue
2498
2500
2499 if next_token != '=':
2501 if next_token != '=':
2500 continue
2502 continue
2501
2503
2502 usedNamedArgs.add(token)
2504 usedNamedArgs.add(token)
2503
2505
2504 argMatches = []
2506 argMatches = []
2505 try:
2507 try:
2506 callableObj = '.'.join(ids[::-1])
2508 callableObj = '.'.join(ids[::-1])
2507 namedArgs = self._default_arguments(eval(callableObj,
2509 namedArgs = self._default_arguments(eval(callableObj,
2508 self.namespace))
2510 self.namespace))
2509
2511
2510 # Remove used named arguments from the list, no need to show twice
2512 # Remove used named arguments from the list, no need to show twice
2511 for namedArg in set(namedArgs) - usedNamedArgs:
2513 for namedArg in set(namedArgs) - usedNamedArgs:
2512 if namedArg.startswith(text):
2514 if namedArg.startswith(text):
2513 argMatches.append("%s=" %namedArg)
2515 argMatches.append("%s=" %namedArg)
2514 except:
2516 except:
2515 pass
2517 pass
2516
2518
2517 return argMatches
2519 return argMatches
2518
2520
2519 @staticmethod
2521 @staticmethod
2520 def _get_keys(obj: Any) -> List[Any]:
2522 def _get_keys(obj: Any) -> List[Any]:
2521 # Objects can define their own completions by defining an
2523 # Objects can define their own completions by defining an
2522 # _ipy_key_completions_() method.
2524 # _ipy_key_completions_() method.
2523 method = get_real_method(obj, '_ipython_key_completions_')
2525 method = get_real_method(obj, '_ipython_key_completions_')
2524 if method is not None:
2526 if method is not None:
2525 return method()
2527 return method()
2526
2528
2527 # Special case some common in-memory dict-like types
2529 # Special case some common in-memory dict-like types
2528 if isinstance(obj, dict) or _safe_isinstance(obj, "pandas", "DataFrame"):
2530 if isinstance(obj, dict) or _safe_isinstance(obj, "pandas", "DataFrame"):
2529 try:
2531 try:
2530 return list(obj.keys())
2532 return list(obj.keys())
2531 except Exception:
2533 except Exception:
2532 return []
2534 return []
2533 elif _safe_isinstance(obj, "pandas", "core", "indexing", "_LocIndexer"):
2535 elif _safe_isinstance(obj, "pandas", "core", "indexing", "_LocIndexer"):
2534 try:
2536 try:
2535 return list(obj.obj.keys())
2537 return list(obj.obj.keys())
2536 except Exception:
2538 except Exception:
2537 return []
2539 return []
2538 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2540 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
2539 _safe_isinstance(obj, 'numpy', 'void'):
2541 _safe_isinstance(obj, 'numpy', 'void'):
2540 return obj.dtype.names or []
2542 return obj.dtype.names or []
2541 return []
2543 return []
2542
2544
2543 @context_matcher()
2545 @context_matcher()
2544 def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2546 def dict_key_matcher(self, context: CompletionContext) -> SimpleMatcherResult:
2545 """Match string keys in a dictionary, after e.g. ``foo[``."""
2547 """Match string keys in a dictionary, after e.g. ``foo[``."""
2546 matches = self.dict_key_matches(context.token)
2548 matches = self.dict_key_matches(context.token)
2547 return _convert_matcher_v1_result_to_v2(
2549 return _convert_matcher_v1_result_to_v2(
2548 matches, type="dict key", suppress_if_matches=True
2550 matches, type="dict key", suppress_if_matches=True
2549 )
2551 )
2550
2552
2551 def dict_key_matches(self, text: str) -> List[str]:
2553 def dict_key_matches(self, text: str) -> List[str]:
2552 """Match string keys in a dictionary, after e.g. ``foo[``.
2554 """Match string keys in a dictionary, after e.g. ``foo[``.
2553
2555
2554 .. deprecated:: 8.6
2556 .. deprecated:: 8.6
2555 You can use :meth:`dict_key_matcher` instead.
2557 You can use :meth:`dict_key_matcher` instead.
2556 """
2558 """
2557
2559
2558 # Short-circuit on closed dictionary (regular expression would
2560 # Short-circuit on closed dictionary (regular expression would
2559 # not match anyway, but would take quite a while).
2561 # not match anyway, but would take quite a while).
2560 if self.text_until_cursor.strip().endswith("]"):
2562 if self.text_until_cursor.strip().endswith("]"):
2561 return []
2563 return []
2562
2564
2563 match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
2565 match = DICT_MATCHER_REGEX.search(self.text_until_cursor)
2564
2566
2565 if match is None:
2567 if match is None:
2566 return []
2568 return []
2567
2569
2568 expr, prior_tuple_keys, key_prefix = match.groups()
2570 expr, prior_tuple_keys, key_prefix = match.groups()
2569
2571
2570 obj = self._evaluate_expr(expr)
2572 obj = self._evaluate_expr(expr)
2571
2573
2572 if obj is not_found:
2574 if obj is not_found:
2573 return []
2575 return []
2574
2576
2575 keys = self._get_keys(obj)
2577 keys = self._get_keys(obj)
2576 if not keys:
2578 if not keys:
2577 return keys
2579 return keys
2578
2580
2579 tuple_prefix = guarded_eval(
2581 tuple_prefix = guarded_eval(
2580 prior_tuple_keys,
2582 prior_tuple_keys,
2581 EvaluationContext(
2583 EvaluationContext(
2582 globals=self.global_namespace,
2584 globals=self.global_namespace,
2583 locals=self.namespace,
2585 locals=self.namespace,
2584 evaluation=self.evaluation, # type: ignore
2586 evaluation=self.evaluation, # type: ignore
2585 in_subscript=True,
2587 in_subscript=True,
2586 ),
2588 ),
2587 )
2589 )
2588
2590
2589 closing_quote, token_offset, matches = match_dict_keys(
2591 closing_quote, token_offset, matches = match_dict_keys(
2590 keys, key_prefix, self.splitter.delims, extra_prefix=tuple_prefix
2592 keys, key_prefix, self.splitter.delims, extra_prefix=tuple_prefix
2591 )
2593 )
2592 if not matches:
2594 if not matches:
2593 return []
2595 return []
2594
2596
2595 # get the cursor position of
2597 # get the cursor position of
2596 # - the text being completed
2598 # - the text being completed
2597 # - the start of the key text
2599 # - the start of the key text
2598 # - the start of the completion
2600 # - the start of the completion
2599 text_start = len(self.text_until_cursor) - len(text)
2601 text_start = len(self.text_until_cursor) - len(text)
2600 if key_prefix:
2602 if key_prefix:
2601 key_start = match.start(3)
2603 key_start = match.start(3)
2602 completion_start = key_start + token_offset
2604 completion_start = key_start + token_offset
2603 else:
2605 else:
2604 key_start = completion_start = match.end()
2606 key_start = completion_start = match.end()
2605
2607
2606 # grab the leading prefix, to make sure all completions start with `text`
2608 # grab the leading prefix, to make sure all completions start with `text`
2607 if text_start > key_start:
2609 if text_start > key_start:
2608 leading = ''
2610 leading = ''
2609 else:
2611 else:
2610 leading = text[text_start:completion_start]
2612 leading = text[text_start:completion_start]
2611
2613
2612 # append closing quote and bracket as appropriate
2614 # append closing quote and bracket as appropriate
2613 # this is *not* appropriate if the opening quote or bracket is outside
2615 # this is *not* appropriate if the opening quote or bracket is outside
2614 # the text given to this method, e.g. `d["""a\nt
2616 # the text given to this method, e.g. `d["""a\nt
2615 can_close_quote = False
2617 can_close_quote = False
2616 can_close_bracket = False
2618 can_close_bracket = False
2617
2619
2618 continuation = self.line_buffer[len(self.text_until_cursor) :].strip()
2620 continuation = self.line_buffer[len(self.text_until_cursor) :].strip()
2619
2621
2620 if continuation.startswith(closing_quote):
2622 if continuation.startswith(closing_quote):
2621 # do not close if already closed, e.g. `d['a<tab>'`
2623 # do not close if already closed, e.g. `d['a<tab>'`
2622 continuation = continuation[len(closing_quote) :]
2624 continuation = continuation[len(closing_quote) :]
2623 else:
2625 else:
2624 can_close_quote = True
2626 can_close_quote = True
2625
2627
2626 continuation = continuation.strip()
2628 continuation = continuation.strip()
2627
2629
2628 # e.g. `pandas.DataFrame` has different tuple indexer behaviour,
2630 # e.g. `pandas.DataFrame` has different tuple indexer behaviour,
2629 # handling it is out of scope, so let's avoid appending suffixes.
2631 # handling it is out of scope, so let's avoid appending suffixes.
2630 has_known_tuple_handling = isinstance(obj, dict)
2632 has_known_tuple_handling = isinstance(obj, dict)
2631
2633
2632 can_close_bracket = (
2634 can_close_bracket = (
2633 not continuation.startswith("]") and self.auto_close_dict_keys
2635 not continuation.startswith("]") and self.auto_close_dict_keys
2634 )
2636 )
2635 can_close_tuple_item = (
2637 can_close_tuple_item = (
2636 not continuation.startswith(",")
2638 not continuation.startswith(",")
2637 and has_known_tuple_handling
2639 and has_known_tuple_handling
2638 and self.auto_close_dict_keys
2640 and self.auto_close_dict_keys
2639 )
2641 )
2640 can_close_quote = can_close_quote and self.auto_close_dict_keys
2642 can_close_quote = can_close_quote and self.auto_close_dict_keys
2641
2643
2642 # fast path if closing quote should be appended but not suffix is allowed
2644 # fast path if closing quote should be appended but not suffix is allowed
2643 if not can_close_quote and not can_close_bracket and closing_quote:
2645 if not can_close_quote and not can_close_bracket and closing_quote:
2644 return [leading + k for k in matches]
2646 return [leading + k for k in matches]
2645
2647
2646 results = []
2648 results = []
2647
2649
2648 end_of_tuple_or_item = _DictKeyState.END_OF_TUPLE | _DictKeyState.END_OF_ITEM
2650 end_of_tuple_or_item = _DictKeyState.END_OF_TUPLE | _DictKeyState.END_OF_ITEM
2649
2651
2650 for k, state_flag in matches.items():
2652 for k, state_flag in matches.items():
2651 result = leading + k
2653 result = leading + k
2652 if can_close_quote and closing_quote:
2654 if can_close_quote and closing_quote:
2653 result += closing_quote
2655 result += closing_quote
2654
2656
2655 if state_flag == end_of_tuple_or_item:
2657 if state_flag == end_of_tuple_or_item:
2656 # We do not know which suffix to add,
2658 # We do not know which suffix to add,
2657 # e.g. both tuple item and string
2659 # e.g. both tuple item and string
2658 # match this item.
2660 # match this item.
2659 pass
2661 pass
2660
2662
2661 if state_flag in end_of_tuple_or_item and can_close_bracket:
2663 if state_flag in end_of_tuple_or_item and can_close_bracket:
2662 result += "]"
2664 result += "]"
2663 if state_flag == _DictKeyState.IN_TUPLE and can_close_tuple_item:
2665 if state_flag == _DictKeyState.IN_TUPLE and can_close_tuple_item:
2664 result += ", "
2666 result += ", "
2665 results.append(result)
2667 results.append(result)
2666 return results
2668 return results
2667
2669
2668 @context_matcher()
2670 @context_matcher()
2669 def unicode_name_matcher(self, context: CompletionContext):
2671 def unicode_name_matcher(self, context: CompletionContext):
2670 """Same as :any:`unicode_name_matches`, but adopted to new Matcher API."""
2672 """Same as :any:`unicode_name_matches`, but adopted to new Matcher API."""
2671 fragment, matches = self.unicode_name_matches(context.text_until_cursor)
2673 fragment, matches = self.unicode_name_matches(context.text_until_cursor)
2672 return _convert_matcher_v1_result_to_v2(
2674 return _convert_matcher_v1_result_to_v2(
2673 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2675 matches, type="unicode", fragment=fragment, suppress_if_matches=True
2674 )
2676 )
2675
2677
2676 @staticmethod
2678 @staticmethod
2677 def unicode_name_matches(text: str) -> Tuple[str, List[str]]:
2679 def unicode_name_matches(text: str) -> Tuple[str, List[str]]:
2678 """Match Latex-like syntax for unicode characters base
2680 """Match Latex-like syntax for unicode characters base
2679 on the name of the character.
2681 on the name of the character.
2680
2682
2681 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
2683 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
2682
2684
2683 Works only on valid python 3 identifier, or on combining characters that
2685 Works only on valid python 3 identifier, or on combining characters that
2684 will combine to form a valid identifier.
2686 will combine to form a valid identifier.
2685 """
2687 """
2686 slashpos = text.rfind('\\')
2688 slashpos = text.rfind('\\')
2687 if slashpos > -1:
2689 if slashpos > -1:
2688 s = text[slashpos+1:]
2690 s = text[slashpos+1:]
2689 try :
2691 try :
2690 unic = unicodedata.lookup(s)
2692 unic = unicodedata.lookup(s)
2691 # allow combining chars
2693 # allow combining chars
2692 if ('a'+unic).isidentifier():
2694 if ('a'+unic).isidentifier():
2693 return '\\'+s,[unic]
2695 return '\\'+s,[unic]
2694 except KeyError:
2696 except KeyError:
2695 pass
2697 pass
2696 return '', []
2698 return '', []
2697
2699
2698 @context_matcher()
2700 @context_matcher()
2699 def latex_name_matcher(self, context: CompletionContext):
2701 def latex_name_matcher(self, context: CompletionContext):
2700 """Match Latex syntax for unicode characters.
2702 """Match Latex syntax for unicode characters.
2701
2703
2702 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2704 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2703 """
2705 """
2704 fragment, matches = self.latex_matches(context.text_until_cursor)
2706 fragment, matches = self.latex_matches(context.text_until_cursor)
2705 return _convert_matcher_v1_result_to_v2(
2707 return _convert_matcher_v1_result_to_v2(
2706 matches, type="latex", fragment=fragment, suppress_if_matches=True
2708 matches, type="latex", fragment=fragment, suppress_if_matches=True
2707 )
2709 )
2708
2710
2709 def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]:
2711 def latex_matches(self, text: str) -> Tuple[str, Sequence[str]]:
2710 """Match Latex syntax for unicode characters.
2712 """Match Latex syntax for unicode characters.
2711
2713
2712 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2714 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
2713
2715
2714 .. deprecated:: 8.6
2716 .. deprecated:: 8.6
2715 You can use :meth:`latex_name_matcher` instead.
2717 You can use :meth:`latex_name_matcher` instead.
2716 """
2718 """
2717 slashpos = text.rfind('\\')
2719 slashpos = text.rfind('\\')
2718 if slashpos > -1:
2720 if slashpos > -1:
2719 s = text[slashpos:]
2721 s = text[slashpos:]
2720 if s in latex_symbols:
2722 if s in latex_symbols:
2721 # Try to complete a full latex symbol to unicode
2723 # Try to complete a full latex symbol to unicode
2722 # \\alpha -> Ξ±
2724 # \\alpha -> Ξ±
2723 return s, [latex_symbols[s]]
2725 return s, [latex_symbols[s]]
2724 else:
2726 else:
2725 # If a user has partially typed a latex symbol, give them
2727 # If a user has partially typed a latex symbol, give them
2726 # a full list of options \al -> [\aleph, \alpha]
2728 # a full list of options \al -> [\aleph, \alpha]
2727 matches = [k for k in latex_symbols if k.startswith(s)]
2729 matches = [k for k in latex_symbols if k.startswith(s)]
2728 if matches:
2730 if matches:
2729 return s, matches
2731 return s, matches
2730 return '', ()
2732 return '', ()
2731
2733
2732 @context_matcher()
2734 @context_matcher()
2733 def custom_completer_matcher(self, context):
2735 def custom_completer_matcher(self, context):
2734 """Dispatch custom completer.
2736 """Dispatch custom completer.
2735
2737
2736 If a match is found, suppresses all other matchers except for Jedi.
2738 If a match is found, suppresses all other matchers except for Jedi.
2737 """
2739 """
2738 matches = self.dispatch_custom_completer(context.token) or []
2740 matches = self.dispatch_custom_completer(context.token) or []
2739 result = _convert_matcher_v1_result_to_v2(
2741 result = _convert_matcher_v1_result_to_v2(
2740 matches, type=_UNKNOWN_TYPE, suppress_if_matches=True
2742 matches, type=_UNKNOWN_TYPE, suppress_if_matches=True
2741 )
2743 )
2742 result["ordered"] = True
2744 result["ordered"] = True
2743 result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)}
2745 result["do_not_suppress"] = {_get_matcher_id(self._jedi_matcher)}
2744 return result
2746 return result
2745
2747
2746 def dispatch_custom_completer(self, text):
2748 def dispatch_custom_completer(self, text):
2747 """
2749 """
2748 .. deprecated:: 8.6
2750 .. deprecated:: 8.6
2749 You can use :meth:`custom_completer_matcher` instead.
2751 You can use :meth:`custom_completer_matcher` instead.
2750 """
2752 """
2751 if not self.custom_completers:
2753 if not self.custom_completers:
2752 return
2754 return
2753
2755
2754 line = self.line_buffer
2756 line = self.line_buffer
2755 if not line.strip():
2757 if not line.strip():
2756 return None
2758 return None
2757
2759
2758 # Create a little structure to pass all the relevant information about
2760 # Create a little structure to pass all the relevant information about
2759 # the current completion to any custom completer.
2761 # the current completion to any custom completer.
2760 event = SimpleNamespace()
2762 event = SimpleNamespace()
2761 event.line = line
2763 event.line = line
2762 event.symbol = text
2764 event.symbol = text
2763 cmd = line.split(None,1)[0]
2765 cmd = line.split(None,1)[0]
2764 event.command = cmd
2766 event.command = cmd
2765 event.text_until_cursor = self.text_until_cursor
2767 event.text_until_cursor = self.text_until_cursor
2766
2768
2767 # for foo etc, try also to find completer for %foo
2769 # for foo etc, try also to find completer for %foo
2768 if not cmd.startswith(self.magic_escape):
2770 if not cmd.startswith(self.magic_escape):
2769 try_magic = self.custom_completers.s_matches(
2771 try_magic = self.custom_completers.s_matches(
2770 self.magic_escape + cmd)
2772 self.magic_escape + cmd)
2771 else:
2773 else:
2772 try_magic = []
2774 try_magic = []
2773
2775
2774 for c in itertools.chain(self.custom_completers.s_matches(cmd),
2776 for c in itertools.chain(self.custom_completers.s_matches(cmd),
2775 try_magic,
2777 try_magic,
2776 self.custom_completers.flat_matches(self.text_until_cursor)):
2778 self.custom_completers.flat_matches(self.text_until_cursor)):
2777 try:
2779 try:
2778 res = c(event)
2780 res = c(event)
2779 if res:
2781 if res:
2780 # first, try case sensitive match
2782 # first, try case sensitive match
2781 withcase = [r for r in res if r.startswith(text)]
2783 withcase = [r for r in res if r.startswith(text)]
2782 if withcase:
2784 if withcase:
2783 return withcase
2785 return withcase
2784 # if none, then case insensitive ones are ok too
2786 # if none, then case insensitive ones are ok too
2785 text_low = text.lower()
2787 text_low = text.lower()
2786 return [r for r in res if r.lower().startswith(text_low)]
2788 return [r for r in res if r.lower().startswith(text_low)]
2787 except TryNext:
2789 except TryNext:
2788 pass
2790 pass
2789 except KeyboardInterrupt:
2791 except KeyboardInterrupt:
2790 """
2792 """
2791 If custom completer take too long,
2793 If custom completer take too long,
2792 let keyboard interrupt abort and return nothing.
2794 let keyboard interrupt abort and return nothing.
2793 """
2795 """
2794 break
2796 break
2795
2797
2796 return None
2798 return None
2797
2799
2798 def completions(self, text: str, offset: int)->Iterator[Completion]:
2800 def completions(self, text: str, offset: int)->Iterator[Completion]:
2799 """
2801 """
2800 Returns an iterator over the possible completions
2802 Returns an iterator over the possible completions
2801
2803
2802 .. warning::
2804 .. warning::
2803
2805
2804 Unstable
2806 Unstable
2805
2807
2806 This function is unstable, API may change without warning.
2808 This function is unstable, API may change without warning.
2807 It will also raise unless use in proper context manager.
2809 It will also raise unless use in proper context manager.
2808
2810
2809 Parameters
2811 Parameters
2810 ----------
2812 ----------
2811 text : str
2813 text : str
2812 Full text of the current input, multi line string.
2814 Full text of the current input, multi line string.
2813 offset : int
2815 offset : int
2814 Integer representing the position of the cursor in ``text``. Offset
2816 Integer representing the position of the cursor in ``text``. Offset
2815 is 0-based indexed.
2817 is 0-based indexed.
2816
2818
2817 Yields
2819 Yields
2818 ------
2820 ------
2819 Completion
2821 Completion
2820
2822
2821 Notes
2823 Notes
2822 -----
2824 -----
2823 The cursor on a text can either be seen as being "in between"
2825 The cursor on a text can either be seen as being "in between"
2824 characters or "On" a character depending on the interface visible to
2826 characters or "On" a character depending on the interface visible to
2825 the user. For consistency the cursor being on "in between" characters X
2827 the user. For consistency the cursor being on "in between" characters X
2826 and Y is equivalent to the cursor being "on" character Y, that is to say
2828 and Y is equivalent to the cursor being "on" character Y, that is to say
2827 the character the cursor is on is considered as being after the cursor.
2829 the character the cursor is on is considered as being after the cursor.
2828
2830
2829 Combining characters may span more that one position in the
2831 Combining characters may span more that one position in the
2830 text.
2832 text.
2831
2833
2832 .. note::
2834 .. note::
2833
2835
2834 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
2836 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
2835 fake Completion token to distinguish completion returned by Jedi
2837 fake Completion token to distinguish completion returned by Jedi
2836 and usual IPython completion.
2838 and usual IPython completion.
2837
2839
2838 .. note::
2840 .. note::
2839
2841
2840 Completions are not completely deduplicated yet. If identical
2842 Completions are not completely deduplicated yet. If identical
2841 completions are coming from different sources this function does not
2843 completions are coming from different sources this function does not
2842 ensure that each completion object will only be present once.
2844 ensure that each completion object will only be present once.
2843 """
2845 """
2844 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
2846 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
2845 "It may change without warnings. "
2847 "It may change without warnings. "
2846 "Use in corresponding context manager.",
2848 "Use in corresponding context manager.",
2847 category=ProvisionalCompleterWarning, stacklevel=2)
2849 category=ProvisionalCompleterWarning, stacklevel=2)
2848
2850
2849 seen = set()
2851 seen = set()
2850 profiler:Optional[cProfile.Profile]
2852 profiler:Optional[cProfile.Profile]
2851 try:
2853 try:
2852 if self.profile_completions:
2854 if self.profile_completions:
2853 import cProfile
2855 import cProfile
2854 profiler = cProfile.Profile()
2856 profiler = cProfile.Profile()
2855 profiler.enable()
2857 profiler.enable()
2856 else:
2858 else:
2857 profiler = None
2859 profiler = None
2858
2860
2859 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
2861 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
2860 if c and (c in seen):
2862 if c and (c in seen):
2861 continue
2863 continue
2862 yield c
2864 yield c
2863 seen.add(c)
2865 seen.add(c)
2864 except KeyboardInterrupt:
2866 except KeyboardInterrupt:
2865 """if completions take too long and users send keyboard interrupt,
2867 """if completions take too long and users send keyboard interrupt,
2866 do not crash and return ASAP. """
2868 do not crash and return ASAP. """
2867 pass
2869 pass
2868 finally:
2870 finally:
2869 if profiler is not None:
2871 if profiler is not None:
2870 profiler.disable()
2872 profiler.disable()
2871 ensure_dir_exists(self.profiler_output_dir)
2873 ensure_dir_exists(self.profiler_output_dir)
2872 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
2874 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
2873 print("Writing profiler output to", output_path)
2875 print("Writing profiler output to", output_path)
2874 profiler.dump_stats(output_path)
2876 profiler.dump_stats(output_path)
2875
2877
2876 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
2878 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
2877 """
2879 """
2878 Core completion module.Same signature as :any:`completions`, with the
2880 Core completion module.Same signature as :any:`completions`, with the
2879 extra `timeout` parameter (in seconds).
2881 extra `timeout` parameter (in seconds).
2880
2882
2881 Computing jedi's completion ``.type`` can be quite expensive (it is a
2883 Computing jedi's completion ``.type`` can be quite expensive (it is a
2882 lazy property) and can require some warm-up, more warm up than just
2884 lazy property) and can require some warm-up, more warm up than just
2883 computing the ``name`` of a completion. The warm-up can be :
2885 computing the ``name`` of a completion. The warm-up can be :
2884
2886
2885 - Long warm-up the first time a module is encountered after
2887 - Long warm-up the first time a module is encountered after
2886 install/update: actually build parse/inference tree.
2888 install/update: actually build parse/inference tree.
2887
2889
2888 - first time the module is encountered in a session: load tree from
2890 - first time the module is encountered in a session: load tree from
2889 disk.
2891 disk.
2890
2892
2891 We don't want to block completions for tens of seconds so we give the
2893 We don't want to block completions for tens of seconds so we give the
2892 completer a "budget" of ``_timeout`` seconds per invocation to compute
2894 completer a "budget" of ``_timeout`` seconds per invocation to compute
2893 completions types, the completions that have not yet been computed will
2895 completions types, the completions that have not yet been computed will
2894 be marked as "unknown" an will have a chance to be computed next round
2896 be marked as "unknown" an will have a chance to be computed next round
2895 are things get cached.
2897 are things get cached.
2896
2898
2897 Keep in mind that Jedi is not the only thing treating the completion so
2899 Keep in mind that Jedi is not the only thing treating the completion so
2898 keep the timeout short-ish as if we take more than 0.3 second we still
2900 keep the timeout short-ish as if we take more than 0.3 second we still
2899 have lots of processing to do.
2901 have lots of processing to do.
2900
2902
2901 """
2903 """
2902 deadline = time.monotonic() + _timeout
2904 deadline = time.monotonic() + _timeout
2903
2905
2904 before = full_text[:offset]
2906 before = full_text[:offset]
2905 cursor_line, cursor_column = position_to_cursor(full_text, offset)
2907 cursor_line, cursor_column = position_to_cursor(full_text, offset)
2906
2908
2907 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2909 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
2908
2910
2909 def is_non_jedi_result(
2911 def is_non_jedi_result(
2910 result: MatcherResult, identifier: str
2912 result: MatcherResult, identifier: str
2911 ) -> TypeGuard[SimpleMatcherResult]:
2913 ) -> TypeGuard[SimpleMatcherResult]:
2912 return identifier != jedi_matcher_id
2914 return identifier != jedi_matcher_id
2913
2915
2914 results = self._complete(
2916 results = self._complete(
2915 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2917 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column
2916 )
2918 )
2917
2919
2918 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2920 non_jedi_results: Dict[str, SimpleMatcherResult] = {
2919 identifier: result
2921 identifier: result
2920 for identifier, result in results.items()
2922 for identifier, result in results.items()
2921 if is_non_jedi_result(result, identifier)
2923 if is_non_jedi_result(result, identifier)
2922 }
2924 }
2923
2925
2924 jedi_matches = (
2926 jedi_matches = (
2925 cast(_JediMatcherResult, results[jedi_matcher_id])["completions"]
2927 cast(_JediMatcherResult, results[jedi_matcher_id])["completions"]
2926 if jedi_matcher_id in results
2928 if jedi_matcher_id in results
2927 else ()
2929 else ()
2928 )
2930 )
2929
2931
2930 iter_jm = iter(jedi_matches)
2932 iter_jm = iter(jedi_matches)
2931 if _timeout:
2933 if _timeout:
2932 for jm in iter_jm:
2934 for jm in iter_jm:
2933 try:
2935 try:
2934 type_ = jm.type
2936 type_ = jm.type
2935 except Exception:
2937 except Exception:
2936 if self.debug:
2938 if self.debug:
2937 print("Error in Jedi getting type of ", jm)
2939 print("Error in Jedi getting type of ", jm)
2938 type_ = None
2940 type_ = None
2939 delta = len(jm.name_with_symbols) - len(jm.complete)
2941 delta = len(jm.name_with_symbols) - len(jm.complete)
2940 if type_ == 'function':
2942 if type_ == 'function':
2941 signature = _make_signature(jm)
2943 signature = _make_signature(jm)
2942 else:
2944 else:
2943 signature = ''
2945 signature = ''
2944 yield Completion(start=offset - delta,
2946 yield Completion(start=offset - delta,
2945 end=offset,
2947 end=offset,
2946 text=jm.name_with_symbols,
2948 text=jm.name_with_symbols,
2947 type=type_,
2949 type=type_,
2948 signature=signature,
2950 signature=signature,
2949 _origin='jedi')
2951 _origin='jedi')
2950
2952
2951 if time.monotonic() > deadline:
2953 if time.monotonic() > deadline:
2952 break
2954 break
2953
2955
2954 for jm in iter_jm:
2956 for jm in iter_jm:
2955 delta = len(jm.name_with_symbols) - len(jm.complete)
2957 delta = len(jm.name_with_symbols) - len(jm.complete)
2956 yield Completion(
2958 yield Completion(
2957 start=offset - delta,
2959 start=offset - delta,
2958 end=offset,
2960 end=offset,
2959 text=jm.name_with_symbols,
2961 text=jm.name_with_symbols,
2960 type=_UNKNOWN_TYPE, # don't compute type for speed
2962 type=_UNKNOWN_TYPE, # don't compute type for speed
2961 _origin="jedi",
2963 _origin="jedi",
2962 signature="",
2964 signature="",
2963 )
2965 )
2964
2966
2965 # TODO:
2967 # TODO:
2966 # Suppress this, right now just for debug.
2968 # Suppress this, right now just for debug.
2967 if jedi_matches and non_jedi_results and self.debug:
2969 if jedi_matches and non_jedi_results and self.debug:
2968 some_start_offset = before.rfind(
2970 some_start_offset = before.rfind(
2969 next(iter(non_jedi_results.values()))["matched_fragment"]
2971 next(iter(non_jedi_results.values()))["matched_fragment"]
2970 )
2972 )
2971 yield Completion(
2973 yield Completion(
2972 start=some_start_offset,
2974 start=some_start_offset,
2973 end=offset,
2975 end=offset,
2974 text="--jedi/ipython--",
2976 text="--jedi/ipython--",
2975 _origin="debug",
2977 _origin="debug",
2976 type="none",
2978 type="none",
2977 signature="",
2979 signature="",
2978 )
2980 )
2979
2981
2980 ordered: List[Completion] = []
2982 ordered: List[Completion] = []
2981 sortable: List[Completion] = []
2983 sortable: List[Completion] = []
2982
2984
2983 for origin, result in non_jedi_results.items():
2985 for origin, result in non_jedi_results.items():
2984 matched_text = result["matched_fragment"]
2986 matched_text = result["matched_fragment"]
2985 start_offset = before.rfind(matched_text)
2987 start_offset = before.rfind(matched_text)
2986 is_ordered = result.get("ordered", False)
2988 is_ordered = result.get("ordered", False)
2987 container = ordered if is_ordered else sortable
2989 container = ordered if is_ordered else sortable
2988
2990
2989 # I'm unsure if this is always true, so let's assert and see if it
2991 # I'm unsure if this is always true, so let's assert and see if it
2990 # crash
2992 # crash
2991 assert before.endswith(matched_text)
2993 assert before.endswith(matched_text)
2992
2994
2993 for simple_completion in result["completions"]:
2995 for simple_completion in result["completions"]:
2994 completion = Completion(
2996 completion = Completion(
2995 start=start_offset,
2997 start=start_offset,
2996 end=offset,
2998 end=offset,
2997 text=simple_completion.text,
2999 text=simple_completion.text,
2998 _origin=origin,
3000 _origin=origin,
2999 signature="",
3001 signature="",
3000 type=simple_completion.type or _UNKNOWN_TYPE,
3002 type=simple_completion.type or _UNKNOWN_TYPE,
3001 )
3003 )
3002 container.append(completion)
3004 container.append(completion)
3003
3005
3004 yield from list(self._deduplicate(ordered + self._sort(sortable)))[
3006 yield from list(self._deduplicate(ordered + self._sort(sortable)))[
3005 :MATCHES_LIMIT
3007 :MATCHES_LIMIT
3006 ]
3008 ]
3007
3009
3008 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
3010 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
3009 """Find completions for the given text and line context.
3011 """Find completions for the given text and line context.
3010
3012
3011 Note that both the text and the line_buffer are optional, but at least
3013 Note that both the text and the line_buffer are optional, but at least
3012 one of them must be given.
3014 one of them must be given.
3013
3015
3014 Parameters
3016 Parameters
3015 ----------
3017 ----------
3016 text : string, optional
3018 text : string, optional
3017 Text to perform the completion on. If not given, the line buffer
3019 Text to perform the completion on. If not given, the line buffer
3018 is split using the instance's CompletionSplitter object.
3020 is split using the instance's CompletionSplitter object.
3019 line_buffer : string, optional
3021 line_buffer : string, optional
3020 If not given, the completer attempts to obtain the current line
3022 If not given, the completer attempts to obtain the current line
3021 buffer via readline. This keyword allows clients which are
3023 buffer via readline. This keyword allows clients which are
3022 requesting for text completions in non-readline contexts to inform
3024 requesting for text completions in non-readline contexts to inform
3023 the completer of the entire text.
3025 the completer of the entire text.
3024 cursor_pos : int, optional
3026 cursor_pos : int, optional
3025 Index of the cursor in the full line buffer. Should be provided by
3027 Index of the cursor in the full line buffer. Should be provided by
3026 remote frontends where kernel has no access to frontend state.
3028 remote frontends where kernel has no access to frontend state.
3027
3029
3028 Returns
3030 Returns
3029 -------
3031 -------
3030 Tuple of two items:
3032 Tuple of two items:
3031 text : str
3033 text : str
3032 Text that was actually used in the completion.
3034 Text that was actually used in the completion.
3033 matches : list
3035 matches : list
3034 A list of completion matches.
3036 A list of completion matches.
3035
3037
3036 Notes
3038 Notes
3037 -----
3039 -----
3038 This API is likely to be deprecated and replaced by
3040 This API is likely to be deprecated and replaced by
3039 :any:`IPCompleter.completions` in the future.
3041 :any:`IPCompleter.completions` in the future.
3040
3042
3041 """
3043 """
3042 warnings.warn('`Completer.complete` is pending deprecation since '
3044 warnings.warn('`Completer.complete` is pending deprecation since '
3043 'IPython 6.0 and will be replaced by `Completer.completions`.',
3045 'IPython 6.0 and will be replaced by `Completer.completions`.',
3044 PendingDeprecationWarning)
3046 PendingDeprecationWarning)
3045 # potential todo, FOLD the 3rd throw away argument of _complete
3047 # potential todo, FOLD the 3rd throw away argument of _complete
3046 # into the first 2 one.
3048 # into the first 2 one.
3047 # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?)
3049 # TODO: Q: does the above refer to jedi completions (i.e. 0-indexed?)
3048 # TODO: should we deprecate now, or does it stay?
3050 # TODO: should we deprecate now, or does it stay?
3049
3051
3050 results = self._complete(
3052 results = self._complete(
3051 line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0
3053 line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0
3052 )
3054 )
3053
3055
3054 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3056 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3055
3057
3056 return self._arrange_and_extract(
3058 return self._arrange_and_extract(
3057 results,
3059 results,
3058 # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version?
3060 # TODO: can we confirm that excluding Jedi here was a deliberate choice in previous version?
3059 skip_matchers={jedi_matcher_id},
3061 skip_matchers={jedi_matcher_id},
3060 # this API does not support different start/end positions (fragments of token).
3062 # this API does not support different start/end positions (fragments of token).
3061 abort_if_offset_changes=True,
3063 abort_if_offset_changes=True,
3062 )
3064 )
3063
3065
3064 def _arrange_and_extract(
3066 def _arrange_and_extract(
3065 self,
3067 self,
3066 results: Dict[str, MatcherResult],
3068 results: Dict[str, MatcherResult],
3067 skip_matchers: Set[str],
3069 skip_matchers: Set[str],
3068 abort_if_offset_changes: bool,
3070 abort_if_offset_changes: bool,
3069 ):
3071 ):
3070 sortable: List[AnyMatcherCompletion] = []
3072 sortable: List[AnyMatcherCompletion] = []
3071 ordered: List[AnyMatcherCompletion] = []
3073 ordered: List[AnyMatcherCompletion] = []
3072 most_recent_fragment = None
3074 most_recent_fragment = None
3073 for identifier, result in results.items():
3075 for identifier, result in results.items():
3074 if identifier in skip_matchers:
3076 if identifier in skip_matchers:
3075 continue
3077 continue
3076 if not result["completions"]:
3078 if not result["completions"]:
3077 continue
3079 continue
3078 if not most_recent_fragment:
3080 if not most_recent_fragment:
3079 most_recent_fragment = result["matched_fragment"]
3081 most_recent_fragment = result["matched_fragment"]
3080 if (
3082 if (
3081 abort_if_offset_changes
3083 abort_if_offset_changes
3082 and result["matched_fragment"] != most_recent_fragment
3084 and result["matched_fragment"] != most_recent_fragment
3083 ):
3085 ):
3084 break
3086 break
3085 if result.get("ordered", False):
3087 if result.get("ordered", False):
3086 ordered.extend(result["completions"])
3088 ordered.extend(result["completions"])
3087 else:
3089 else:
3088 sortable.extend(result["completions"])
3090 sortable.extend(result["completions"])
3089
3091
3090 if not most_recent_fragment:
3092 if not most_recent_fragment:
3091 most_recent_fragment = "" # to satisfy typechecker (and just in case)
3093 most_recent_fragment = "" # to satisfy typechecker (and just in case)
3092
3094
3093 return most_recent_fragment, [
3095 return most_recent_fragment, [
3094 m.text for m in self._deduplicate(ordered + self._sort(sortable))
3096 m.text for m in self._deduplicate(ordered + self._sort(sortable))
3095 ]
3097 ]
3096
3098
3097 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
3099 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
3098 full_text=None) -> _CompleteResult:
3100 full_text=None) -> _CompleteResult:
3099 """
3101 """
3100 Like complete but can also returns raw jedi completions as well as the
3102 Like complete but can also returns raw jedi completions as well as the
3101 origin of the completion text. This could (and should) be made much
3103 origin of the completion text. This could (and should) be made much
3102 cleaner but that will be simpler once we drop the old (and stateful)
3104 cleaner but that will be simpler once we drop the old (and stateful)
3103 :any:`complete` API.
3105 :any:`complete` API.
3104
3106
3105 With current provisional API, cursor_pos act both (depending on the
3107 With current provisional API, cursor_pos act both (depending on the
3106 caller) as the offset in the ``text`` or ``line_buffer``, or as the
3108 caller) as the offset in the ``text`` or ``line_buffer``, or as the
3107 ``column`` when passing multiline strings this could/should be renamed
3109 ``column`` when passing multiline strings this could/should be renamed
3108 but would add extra noise.
3110 but would add extra noise.
3109
3111
3110 Parameters
3112 Parameters
3111 ----------
3113 ----------
3112 cursor_line
3114 cursor_line
3113 Index of the line the cursor is on. 0 indexed.
3115 Index of the line the cursor is on. 0 indexed.
3114 cursor_pos
3116 cursor_pos
3115 Position of the cursor in the current line/line_buffer/text. 0
3117 Position of the cursor in the current line/line_buffer/text. 0
3116 indexed.
3118 indexed.
3117 line_buffer : optional, str
3119 line_buffer : optional, str
3118 The current line the cursor is in, this is mostly due to legacy
3120 The current line the cursor is in, this is mostly due to legacy
3119 reason that readline could only give a us the single current line.
3121 reason that readline could only give a us the single current line.
3120 Prefer `full_text`.
3122 Prefer `full_text`.
3121 text : str
3123 text : str
3122 The current "token" the cursor is in, mostly also for historical
3124 The current "token" the cursor is in, mostly also for historical
3123 reasons. as the completer would trigger only after the current line
3125 reasons. as the completer would trigger only after the current line
3124 was parsed.
3126 was parsed.
3125 full_text : str
3127 full_text : str
3126 Full text of the current cell.
3128 Full text of the current cell.
3127
3129
3128 Returns
3130 Returns
3129 -------
3131 -------
3130 An ordered dictionary where keys are identifiers of completion
3132 An ordered dictionary where keys are identifiers of completion
3131 matchers and values are ``MatcherResult``s.
3133 matchers and values are ``MatcherResult``s.
3132 """
3134 """
3133
3135
3134 # if the cursor position isn't given, the only sane assumption we can
3136 # if the cursor position isn't given, the only sane assumption we can
3135 # make is that it's at the end of the line (the common case)
3137 # make is that it's at the end of the line (the common case)
3136 if cursor_pos is None:
3138 if cursor_pos is None:
3137 cursor_pos = len(line_buffer) if text is None else len(text)
3139 cursor_pos = len(line_buffer) if text is None else len(text)
3138
3140
3139 if self.use_main_ns:
3141 if self.use_main_ns:
3140 self.namespace = __main__.__dict__
3142 self.namespace = __main__.__dict__
3141
3143
3142 # if text is either None or an empty string, rely on the line buffer
3144 # if text is either None or an empty string, rely on the line buffer
3143 if (not line_buffer) and full_text:
3145 if (not line_buffer) and full_text:
3144 line_buffer = full_text.split('\n')[cursor_line]
3146 line_buffer = full_text.split('\n')[cursor_line]
3145 if not text: # issue #11508: check line_buffer before calling split_line
3147 if not text: # issue #11508: check line_buffer before calling split_line
3146 text = (
3148 text = (
3147 self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ""
3149 self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ""
3148 )
3150 )
3149
3151
3150 # If no line buffer is given, assume the input text is all there was
3152 # If no line buffer is given, assume the input text is all there was
3151 if line_buffer is None:
3153 if line_buffer is None:
3152 line_buffer = text
3154 line_buffer = text
3153
3155
3154 # deprecated - do not use `line_buffer` in new code.
3156 # deprecated - do not use `line_buffer` in new code.
3155 self.line_buffer = line_buffer
3157 self.line_buffer = line_buffer
3156 self.text_until_cursor = self.line_buffer[:cursor_pos]
3158 self.text_until_cursor = self.line_buffer[:cursor_pos]
3157
3159
3158 if not full_text:
3160 if not full_text:
3159 full_text = line_buffer
3161 full_text = line_buffer
3160
3162
3161 context = CompletionContext(
3163 context = CompletionContext(
3162 full_text=full_text,
3164 full_text=full_text,
3163 cursor_position=cursor_pos,
3165 cursor_position=cursor_pos,
3164 cursor_line=cursor_line,
3166 cursor_line=cursor_line,
3165 token=text,
3167 token=text,
3166 limit=MATCHES_LIMIT,
3168 limit=MATCHES_LIMIT,
3167 )
3169 )
3168
3170
3169 # Start with a clean slate of completions
3171 # Start with a clean slate of completions
3170 results: Dict[str, MatcherResult] = {}
3172 results: Dict[str, MatcherResult] = {}
3171
3173
3172 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3174 jedi_matcher_id = _get_matcher_id(self._jedi_matcher)
3173
3175
3174 suppressed_matchers: Set[str] = set()
3176 suppressed_matchers: Set[str] = set()
3175
3177
3176 matchers = {
3178 matchers = {
3177 _get_matcher_id(matcher): matcher
3179 _get_matcher_id(matcher): matcher
3178 for matcher in sorted(
3180 for matcher in sorted(
3179 self.matchers, key=_get_matcher_priority, reverse=True
3181 self.matchers, key=_get_matcher_priority, reverse=True
3180 )
3182 )
3181 }
3183 }
3182
3184
3183 for matcher_id, matcher in matchers.items():
3185 for matcher_id, matcher in matchers.items():
3184 matcher_id = _get_matcher_id(matcher)
3186 matcher_id = _get_matcher_id(matcher)
3185
3187
3186 if matcher_id in self.disable_matchers:
3188 if matcher_id in self.disable_matchers:
3187 continue
3189 continue
3188
3190
3189 if matcher_id in results:
3191 if matcher_id in results:
3190 warnings.warn(f"Duplicate matcher ID: {matcher_id}.")
3192 warnings.warn(f"Duplicate matcher ID: {matcher_id}.")
3191
3193
3192 if matcher_id in suppressed_matchers:
3194 if matcher_id in suppressed_matchers:
3193 continue
3195 continue
3194
3196
3195 result: MatcherResult
3197 result: MatcherResult
3196 try:
3198 try:
3197 if _is_matcher_v1(matcher):
3199 if _is_matcher_v1(matcher):
3198 result = _convert_matcher_v1_result_to_v2(
3200 result = _convert_matcher_v1_result_to_v2(
3199 matcher(text), type=_UNKNOWN_TYPE
3201 matcher(text), type=_UNKNOWN_TYPE
3200 )
3202 )
3201 elif _is_matcher_v2(matcher):
3203 elif _is_matcher_v2(matcher):
3202 result = matcher(context)
3204 result = matcher(context)
3203 else:
3205 else:
3204 api_version = _get_matcher_api_version(matcher)
3206 api_version = _get_matcher_api_version(matcher)
3205 raise ValueError(f"Unsupported API version {api_version}")
3207 raise ValueError(f"Unsupported API version {api_version}")
3206 except:
3208 except:
3207 # Show the ugly traceback if the matcher causes an
3209 # Show the ugly traceback if the matcher causes an
3208 # exception, but do NOT crash the kernel!
3210 # exception, but do NOT crash the kernel!
3209 sys.excepthook(*sys.exc_info())
3211 sys.excepthook(*sys.exc_info())
3210 continue
3212 continue
3211
3213
3212 # set default value for matched fragment if suffix was not selected.
3214 # set default value for matched fragment if suffix was not selected.
3213 result["matched_fragment"] = result.get("matched_fragment", context.token)
3215 result["matched_fragment"] = result.get("matched_fragment", context.token)
3214
3216
3215 if not suppressed_matchers:
3217 if not suppressed_matchers:
3216 suppression_recommended: Union[bool, Set[str]] = result.get(
3218 suppression_recommended: Union[bool, Set[str]] = result.get(
3217 "suppress", False
3219 "suppress", False
3218 )
3220 )
3219
3221
3220 suppression_config = (
3222 suppression_config = (
3221 self.suppress_competing_matchers.get(matcher_id, None)
3223 self.suppress_competing_matchers.get(matcher_id, None)
3222 if isinstance(self.suppress_competing_matchers, dict)
3224 if isinstance(self.suppress_competing_matchers, dict)
3223 else self.suppress_competing_matchers
3225 else self.suppress_competing_matchers
3224 )
3226 )
3225 should_suppress = (
3227 should_suppress = (
3226 (suppression_config is True)
3228 (suppression_config is True)
3227 or (suppression_recommended and (suppression_config is not False))
3229 or (suppression_recommended and (suppression_config is not False))
3228 ) and has_any_completions(result)
3230 ) and has_any_completions(result)
3229
3231
3230 if should_suppress:
3232 if should_suppress:
3231 suppression_exceptions: Set[str] = result.get(
3233 suppression_exceptions: Set[str] = result.get(
3232 "do_not_suppress", set()
3234 "do_not_suppress", set()
3233 )
3235 )
3234 if isinstance(suppression_recommended, Iterable):
3236 if isinstance(suppression_recommended, Iterable):
3235 to_suppress = set(suppression_recommended)
3237 to_suppress = set(suppression_recommended)
3236 else:
3238 else:
3237 to_suppress = set(matchers)
3239 to_suppress = set(matchers)
3238 suppressed_matchers = to_suppress - suppression_exceptions
3240 suppressed_matchers = to_suppress - suppression_exceptions
3239
3241
3240 new_results = {}
3242 new_results = {}
3241 for previous_matcher_id, previous_result in results.items():
3243 for previous_matcher_id, previous_result in results.items():
3242 if previous_matcher_id not in suppressed_matchers:
3244 if previous_matcher_id not in suppressed_matchers:
3243 new_results[previous_matcher_id] = previous_result
3245 new_results[previous_matcher_id] = previous_result
3244 results = new_results
3246 results = new_results
3245
3247
3246 results[matcher_id] = result
3248 results[matcher_id] = result
3247
3249
3248 _, matches = self._arrange_and_extract(
3250 _, matches = self._arrange_and_extract(
3249 results,
3251 results,
3250 # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission?
3252 # TODO Jedi completions non included in legacy stateful API; was this deliberate or omission?
3251 # if it was omission, we can remove the filtering step, otherwise remove this comment.
3253 # if it was omission, we can remove the filtering step, otherwise remove this comment.
3252 skip_matchers={jedi_matcher_id},
3254 skip_matchers={jedi_matcher_id},
3253 abort_if_offset_changes=False,
3255 abort_if_offset_changes=False,
3254 )
3256 )
3255
3257
3256 # populate legacy stateful API
3258 # populate legacy stateful API
3257 self.matches = matches
3259 self.matches = matches
3258
3260
3259 return results
3261 return results
3260
3262
3261 @staticmethod
3263 @staticmethod
3262 def _deduplicate(
3264 def _deduplicate(
3263 matches: Sequence[AnyCompletion],
3265 matches: Sequence[AnyCompletion],
3264 ) -> Iterable[AnyCompletion]:
3266 ) -> Iterable[AnyCompletion]:
3265 filtered_matches: Dict[str, AnyCompletion] = {}
3267 filtered_matches: Dict[str, AnyCompletion] = {}
3266 for match in matches:
3268 for match in matches:
3267 text = match.text
3269 text = match.text
3268 if (
3270 if (
3269 text not in filtered_matches
3271 text not in filtered_matches
3270 or filtered_matches[text].type == _UNKNOWN_TYPE
3272 or filtered_matches[text].type == _UNKNOWN_TYPE
3271 ):
3273 ):
3272 filtered_matches[text] = match
3274 filtered_matches[text] = match
3273
3275
3274 return filtered_matches.values()
3276 return filtered_matches.values()
3275
3277
3276 @staticmethod
3278 @staticmethod
3277 def _sort(matches: Sequence[AnyCompletion]):
3279 def _sort(matches: Sequence[AnyCompletion]):
3278 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
3280 return sorted(matches, key=lambda x: completions_sorting_key(x.text))
3279
3281
3280 @context_matcher()
3282 @context_matcher()
3281 def fwd_unicode_matcher(self, context: CompletionContext):
3283 def fwd_unicode_matcher(self, context: CompletionContext):
3282 """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API."""
3284 """Same as :any:`fwd_unicode_match`, but adopted to new Matcher API."""
3283 # TODO: use `context.limit` to terminate early once we matched the maximum
3285 # TODO: use `context.limit` to terminate early once we matched the maximum
3284 # number that will be used downstream; can be added as an optional to
3286 # number that will be used downstream; can be added as an optional to
3285 # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here.
3287 # `fwd_unicode_match(text: str, limit: int = None)` or we could re-implement here.
3286 fragment, matches = self.fwd_unicode_match(context.text_until_cursor)
3288 fragment, matches = self.fwd_unicode_match(context.text_until_cursor)
3287 return _convert_matcher_v1_result_to_v2(
3289 return _convert_matcher_v1_result_to_v2(
3288 matches, type="unicode", fragment=fragment, suppress_if_matches=True
3290 matches, type="unicode", fragment=fragment, suppress_if_matches=True
3289 )
3291 )
3290
3292
3291 def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]:
3293 def fwd_unicode_match(self, text: str) -> Tuple[str, Sequence[str]]:
3292 """
3294 """
3293 Forward match a string starting with a backslash with a list of
3295 Forward match a string starting with a backslash with a list of
3294 potential Unicode completions.
3296 potential Unicode completions.
3295
3297
3296 Will compute list of Unicode character names on first call and cache it.
3298 Will compute list of Unicode character names on first call and cache it.
3297
3299
3298 .. deprecated:: 8.6
3300 .. deprecated:: 8.6
3299 You can use :meth:`fwd_unicode_matcher` instead.
3301 You can use :meth:`fwd_unicode_matcher` instead.
3300
3302
3301 Returns
3303 Returns
3302 -------
3304 -------
3303 At tuple with:
3305 At tuple with:
3304 - matched text (empty if no matches)
3306 - matched text (empty if no matches)
3305 - list of potential completions, empty tuple otherwise)
3307 - list of potential completions, empty tuple otherwise)
3306 """
3308 """
3307 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
3309 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
3308 # We could do a faster match using a Trie.
3310 # We could do a faster match using a Trie.
3309
3311
3310 # Using pygtrie the following seem to work:
3312 # Using pygtrie the following seem to work:
3311
3313
3312 # s = PrefixSet()
3314 # s = PrefixSet()
3313
3315
3314 # for c in range(0,0x10FFFF + 1):
3316 # for c in range(0,0x10FFFF + 1):
3315 # try:
3317 # try:
3316 # s.add(unicodedata.name(chr(c)))
3318 # s.add(unicodedata.name(chr(c)))
3317 # except ValueError:
3319 # except ValueError:
3318 # pass
3320 # pass
3319 # [''.join(k) for k in s.iter(prefix)]
3321 # [''.join(k) for k in s.iter(prefix)]
3320
3322
3321 # But need to be timed and adds an extra dependency.
3323 # But need to be timed and adds an extra dependency.
3322
3324
3323 slashpos = text.rfind('\\')
3325 slashpos = text.rfind('\\')
3324 # if text starts with slash
3326 # if text starts with slash
3325 if slashpos > -1:
3327 if slashpos > -1:
3326 # PERF: It's important that we don't access self._unicode_names
3328 # PERF: It's important that we don't access self._unicode_names
3327 # until we're inside this if-block. _unicode_names is lazily
3329 # until we're inside this if-block. _unicode_names is lazily
3328 # initialized, and it takes a user-noticeable amount of time to
3330 # initialized, and it takes a user-noticeable amount of time to
3329 # initialize it, so we don't want to initialize it unless we're
3331 # initialize it, so we don't want to initialize it unless we're
3330 # actually going to use it.
3332 # actually going to use it.
3331 s = text[slashpos + 1 :]
3333 s = text[slashpos + 1 :]
3332 sup = s.upper()
3334 sup = s.upper()
3333 candidates = [x for x in self.unicode_names if x.startswith(sup)]
3335 candidates = [x for x in self.unicode_names if x.startswith(sup)]
3334 if candidates:
3336 if candidates:
3335 return s, candidates
3337 return s, candidates
3336 candidates = [x for x in self.unicode_names if sup in x]
3338 candidates = [x for x in self.unicode_names if sup in x]
3337 if candidates:
3339 if candidates:
3338 return s, candidates
3340 return s, candidates
3339 splitsup = sup.split(" ")
3341 splitsup = sup.split(" ")
3340 candidates = [
3342 candidates = [
3341 x for x in self.unicode_names if all(u in x for u in splitsup)
3343 x for x in self.unicode_names if all(u in x for u in splitsup)
3342 ]
3344 ]
3343 if candidates:
3345 if candidates:
3344 return s, candidates
3346 return s, candidates
3345
3347
3346 return "", ()
3348 return "", ()
3347
3349
3348 # if text does not start with slash
3350 # if text does not start with slash
3349 else:
3351 else:
3350 return '', ()
3352 return '', ()
3351
3353
3352 @property
3354 @property
3353 def unicode_names(self) -> List[str]:
3355 def unicode_names(self) -> List[str]:
3354 """List of names of unicode code points that can be completed.
3356 """List of names of unicode code points that can be completed.
3355
3357
3356 The list is lazily initialized on first access.
3358 The list is lazily initialized on first access.
3357 """
3359 """
3358 if self._unicode_names is None:
3360 if self._unicode_names is None:
3359 names = []
3361 names = []
3360 for c in range(0,0x10FFFF + 1):
3362 for c in range(0,0x10FFFF + 1):
3361 try:
3363 try:
3362 names.append(unicodedata.name(chr(c)))
3364 names.append(unicodedata.name(chr(c)))
3363 except ValueError:
3365 except ValueError:
3364 pass
3366 pass
3365 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
3367 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
3366
3368
3367 return self._unicode_names
3369 return self._unicode_names
3368
3370
3369 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
3371 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
3370 names = []
3372 names = []
3371 for start,stop in ranges:
3373 for start,stop in ranges:
3372 for c in range(start, stop) :
3374 for c in range(start, stop) :
3373 try:
3375 try:
3374 names.append(unicodedata.name(chr(c)))
3376 names.append(unicodedata.name(chr(c)))
3375 except ValueError:
3377 except ValueError:
3376 pass
3378 pass
3377 return names
3379 return names
@@ -1,395 +1,394
1 [build-system]
1 [build-system]
2 requires = ["setuptools>=61.2"]
2 requires = ["setuptools>=61.2"]
3 # We need access to the 'setupbase' module at build time.
3 # We need access to the 'setupbase' module at build time.
4 # Hence we declare a custom build backend.
4 # Hence we declare a custom build backend.
5 build-backend = "_build_meta" # just re-exports setuptools.build_meta definitions
5 build-backend = "_build_meta" # just re-exports setuptools.build_meta definitions
6 backend-path = ["."]
6 backend-path = ["."]
7
7
8 [project]
8 [project]
9 name = "ipython"
9 name = "ipython"
10 description = "IPython: Productive Interactive Computing"
10 description = "IPython: Productive Interactive Computing"
11 keywords = ["Interactive", "Interpreter", "Shell", "Embedding"]
11 keywords = ["Interactive", "Interpreter", "Shell", "Embedding"]
12 classifiers = [
12 classifiers = [
13 "Framework :: IPython",
13 "Framework :: IPython",
14 "Framework :: Jupyter",
14 "Framework :: Jupyter",
15 "Intended Audience :: Developers",
15 "Intended Audience :: Developers",
16 "Intended Audience :: Science/Research",
16 "Intended Audience :: Science/Research",
17 "License :: OSI Approved :: BSD License",
17 "License :: OSI Approved :: BSD License",
18 "Programming Language :: Python",
18 "Programming Language :: Python",
19 "Programming Language :: Python :: 3",
19 "Programming Language :: Python :: 3",
20 "Programming Language :: Python :: 3 :: Only",
20 "Programming Language :: Python :: 3 :: Only",
21 "Topic :: System :: Shells",
21 "Topic :: System :: Shells",
22 ]
22 ]
23 requires-python = ">=3.11"
23 requires-python = ">=3.11"
24 dependencies = [
24 dependencies = [
25 'colorama; sys_platform == "win32"',
25 'colorama; sys_platform == "win32"',
26 "decorator",
26 "decorator",
27 "jedi>=0.16",
27 "jedi>=0.16",
28 "matplotlib-inline",
28 "matplotlib-inline",
29 'pexpect>4.3; sys_platform != "win32" and sys_platform != "emscripten"',
29 'pexpect>4.3; sys_platform != "win32" and sys_platform != "emscripten"',
30 "prompt_toolkit>=3.0.41,<3.1.0",
30 "prompt_toolkit>=3.0.41,<3.1.0",
31 "pygments>=2.4.0",
31 "pygments>=2.4.0",
32 "stack_data",
32 "stack_data",
33 "traitlets>=5.13.0",
33 "traitlets>=5.13.0",
34 "typing_extensions>=4.6; python_version<'3.12'",
34 "typing_extensions>=4.6; python_version<'3.12'",
35 ]
35 ]
36 dynamic = ["authors", "license", "version"]
36 dynamic = ["authors", "license", "version"]
37
37
38 [project.entry-points."pygments.lexers"]
38 [project.entry-points."pygments.lexers"]
39 ipythonconsole = "IPython.lib.lexers:IPythonConsoleLexer"
39 ipythonconsole = "IPython.lib.lexers:IPythonConsoleLexer"
40 ipython = "IPython.lib.lexers:IPythonLexer"
40 ipython = "IPython.lib.lexers:IPythonLexer"
41 ipython3 = "IPython.lib.lexers:IPython3Lexer"
41 ipython3 = "IPython.lib.lexers:IPython3Lexer"
42
42
43 [project.scripts]
43 [project.scripts]
44 ipython = "IPython:start_ipython"
44 ipython = "IPython:start_ipython"
45 ipython3 = "IPython:start_ipython"
45 ipython3 = "IPython:start_ipython"
46
46
47 [project.readme]
47 [project.readme]
48 file = "long_description.rst"
48 file = "long_description.rst"
49 content-type = "text/x-rst"
49 content-type = "text/x-rst"
50
50
51 [project.urls]
51 [project.urls]
52 Homepage = "https://ipython.org"
52 Homepage = "https://ipython.org"
53 Documentation = "https://ipython.readthedocs.io/"
53 Documentation = "https://ipython.readthedocs.io/"
54 Funding = "https://numfocus.org/"
54 Funding = "https://numfocus.org/"
55 Source = "https://github.com/ipython/ipython"
55 Source = "https://github.com/ipython/ipython"
56 Tracker = "https://github.com/ipython/ipython/issues"
56 Tracker = "https://github.com/ipython/ipython/issues"
57
57
58 [project.optional-dependencies]
58 [project.optional-dependencies]
59 black = [
59 black = [
60 "black",
60 "black",
61 ]
61 ]
62 doc = [
62 doc = [
63 "docrepr",
63 "docrepr",
64 "exceptiongroup",
64 "exceptiongroup",
65 "intersphinx_registry",
65 "intersphinx_registry",
66 "ipykernel",
66 "ipykernel",
67 "ipython[test]",
67 "ipython[test]",
68 "matplotlib",
68 "matplotlib",
69 "setuptools>=18.5",
69 "setuptools>=18.5",
70 "sphinx-rtd-theme",
70 "sphinx-rtd-theme",
71 "sphinx>=1.3",
71 "sphinx>=1.3",
72 "sphinxcontrib-jquery",
72 "sphinxcontrib-jquery",
73 "typing_extensions",
74 ]
73 ]
75 kernel = [
74 kernel = [
76 "ipykernel",
75 "ipykernel",
77 ]
76 ]
78 nbconvert = [
77 nbconvert = [
79 "nbconvert",
78 "nbconvert",
80 ]
79 ]
81 nbformat = [
80 nbformat = [
82 "nbformat",
81 "nbformat",
83 ]
82 ]
84 notebook = [
83 notebook = [
85 "ipywidgets",
84 "ipywidgets",
86 "notebook",
85 "notebook",
87 ]
86 ]
88 parallel = [
87 parallel = [
89 "ipyparallel",
88 "ipyparallel",
90 ]
89 ]
91 qtconsole = [
90 qtconsole = [
92 "qtconsole",
91 "qtconsole",
93 ]
92 ]
94 terminal = []
93 terminal = []
95 test = [
94 test = [
96 "pytest",
95 "pytest",
97 "pytest-asyncio<0.22",
96 "pytest-asyncio<0.22",
98 "testpath",
97 "testpath",
99 "pickleshare",
98 "pickleshare",
100 "packaging",
99 "packaging",
101 ]
100 ]
102 test_extra = [
101 test_extra = [
103 "ipython[test]",
102 "ipython[test]",
104 "curio",
103 "curio",
105 "matplotlib!=3.2.0",
104 "matplotlib!=3.2.0",
106 "nbformat",
105 "nbformat",
107 "numpy>=1.23",
106 "numpy>=1.23",
108 "pandas",
107 "pandas",
109 "trio",
108 "trio",
110 ]
109 ]
111 matplotlib = [
110 matplotlib = [
112 "matplotlib"
111 "matplotlib"
113 ]
112 ]
114 all = [
113 all = [
115 "ipython[black,doc,kernel,nbconvert,nbformat,notebook,parallel,qtconsole,matplotlib]",
114 "ipython[black,doc,kernel,nbconvert,nbformat,notebook,parallel,qtconsole,matplotlib]",
116 "ipython[test,test_extra]",
115 "ipython[test,test_extra]",
117 ]
116 ]
118
117
119 [tool.mypy]
118 [tool.mypy]
120 python_version = "3.10"
119 python_version = "3.10"
121 ignore_missing_imports = true
120 ignore_missing_imports = true
122 follow_imports = 'silent'
121 follow_imports = 'silent'
123 exclude = [
122 exclude = [
124 'test_\.+\.py',
123 'test_\.+\.py',
125 'IPython.utils.tests.test_wildcard',
124 'IPython.utils.tests.test_wildcard',
126 'testing',
125 'testing',
127 'tests',
126 'tests',
128 'PyColorize.py',
127 'PyColorize.py',
129 '_process_win32_controller.py',
128 '_process_win32_controller.py',
130 'IPython/core/application.py',
129 'IPython/core/application.py',
131 'IPython/core/profileapp.py',
130 'IPython/core/profileapp.py',
132 'IPython/lib/deepreload.py',
131 'IPython/lib/deepreload.py',
133 'IPython/sphinxext/ipython_directive.py',
132 'IPython/sphinxext/ipython_directive.py',
134 'IPython/terminal/ipapp.py',
133 'IPython/terminal/ipapp.py',
135 'IPython/utils/_process_win32.py',
134 'IPython/utils/_process_win32.py',
136 'IPython/utils/path.py',
135 'IPython/utils/path.py',
137 ]
136 ]
138 # check_untyped_defs = true
137 # check_untyped_defs = true
139 # disallow_untyped_calls = true
138 # disallow_untyped_calls = true
140 # disallow_untyped_decorators = true
139 # disallow_untyped_decorators = true
141 # ignore_errors = false
140 # ignore_errors = false
142 # ignore_missing_imports = false
141 # ignore_missing_imports = false
143 disallow_incomplete_defs = true
142 disallow_incomplete_defs = true
144 disallow_untyped_defs = true
143 disallow_untyped_defs = true
145 warn_redundant_casts = true
144 warn_redundant_casts = true
146
145
147 [[tool.mypy.overrides]]
146 [[tool.mypy.overrides]]
148 module = [
147 module = [
149 "IPython.core.crashhandler",
148 "IPython.core.crashhandler",
150 ]
149 ]
151 check_untyped_defs = true
150 check_untyped_defs = true
152 disallow_incomplete_defs = true
151 disallow_incomplete_defs = true
153 disallow_untyped_calls = true
152 disallow_untyped_calls = true
154 disallow_untyped_decorators = true
153 disallow_untyped_decorators = true
155 disallow_untyped_defs = true
154 disallow_untyped_defs = true
156 ignore_errors = false
155 ignore_errors = false
157 ignore_missing_imports = false
156 ignore_missing_imports = false
158
157
159 [[tool.mypy.overrides]]
158 [[tool.mypy.overrides]]
160 module = [
159 module = [
161 "IPython.utils.text",
160 "IPython.utils.text",
162 ]
161 ]
163 disallow_untyped_defs = true
162 disallow_untyped_defs = true
164 check_untyped_defs = false
163 check_untyped_defs = false
165 disallow_untyped_decorators = true
164 disallow_untyped_decorators = true
166
165
167 [[tool.mypy.overrides]]
166 [[tool.mypy.overrides]]
168 module = [
167 module = [
169 ]
168 ]
170 disallow_untyped_defs = false
169 disallow_untyped_defs = false
171 ignore_errors = true
170 ignore_errors = true
172 ignore_missing_imports = true
171 ignore_missing_imports = true
173 disallow_untyped_calls = false
172 disallow_untyped_calls = false
174 disallow_incomplete_defs = false
173 disallow_incomplete_defs = false
175 check_untyped_defs = false
174 check_untyped_defs = false
176 disallow_untyped_decorators = false
175 disallow_untyped_decorators = false
177
176
178
177
179 # gloabl ignore error
178 # gloabl ignore error
180 [[tool.mypy.overrides]]
179 [[tool.mypy.overrides]]
181 module = [
180 module = [
182 "IPython",
181 "IPython",
183 "IPython.conftest",
182 "IPython.conftest",
184 "IPython.core.alias",
183 "IPython.core.alias",
185 "IPython.core.async_helpers",
184 "IPython.core.async_helpers",
186 "IPython.core.autocall",
185 "IPython.core.autocall",
187 "IPython.core.builtin_trap",
186 "IPython.core.builtin_trap",
188 "IPython.core.compilerop",
187 "IPython.core.compilerop",
189 "IPython.core.completer",
188 "IPython.core.completer",
190 "IPython.core.completerlib",
189 "IPython.core.completerlib",
191 "IPython.core.debugger",
190 "IPython.core.debugger",
192 "IPython.core.display",
191 "IPython.core.display",
193 "IPython.core.display_functions",
192 "IPython.core.display_functions",
194 "IPython.core.display_trap",
193 "IPython.core.display_trap",
195 "IPython.core.displayhook",
194 "IPython.core.displayhook",
196 "IPython.core.displaypub",
195 "IPython.core.displaypub",
197 "IPython.core.events",
196 "IPython.core.events",
198 "IPython.core.excolors",
197 "IPython.core.excolors",
199 "IPython.core.extensions",
198 "IPython.core.extensions",
200 "IPython.core.formatters",
199 "IPython.core.formatters",
201 "IPython.core.getipython",
200 "IPython.core.getipython",
202 "IPython.core.guarded_eval",
201 "IPython.core.guarded_eval",
203 "IPython.core.history",
202 "IPython.core.history",
204 "IPython.core.historyapp",
203 "IPython.core.historyapp",
205 "IPython.core.hooks",
204 "IPython.core.hooks",
206 "IPython.core.inputsplitter",
205 "IPython.core.inputsplitter",
207 "IPython.core.inputtransformer",
206 "IPython.core.inputtransformer",
208 "IPython.core.inputtransformer2",
207 "IPython.core.inputtransformer2",
209 "IPython.core.interactiveshell",
208 "IPython.core.interactiveshell",
210 "IPython.core.logger",
209 "IPython.core.logger",
211 "IPython.core.macro",
210 "IPython.core.macro",
212 "IPython.core.magic",
211 "IPython.core.magic",
213 "IPython.core.magic_arguments",
212 "IPython.core.magic_arguments",
214 "IPython.core.magics.ast_mod",
213 "IPython.core.magics.ast_mod",
215 "IPython.core.magics.auto",
214 "IPython.core.magics.auto",
216 "IPython.core.magics.basic",
215 "IPython.core.magics.basic",
217 "IPython.core.magics.code",
216 "IPython.core.magics.code",
218 "IPython.core.magics.config",
217 "IPython.core.magics.config",
219 "IPython.core.magics.display",
218 "IPython.core.magics.display",
220 "IPython.core.magics.execution",
219 "IPython.core.magics.execution",
221 "IPython.core.magics.extension",
220 "IPython.core.magics.extension",
222 "IPython.core.magics.history",
221 "IPython.core.magics.history",
223 "IPython.core.magics.logging",
222 "IPython.core.magics.logging",
224 "IPython.core.magics.namespace",
223 "IPython.core.magics.namespace",
225 "IPython.core.magics.osm",
224 "IPython.core.magics.osm",
226 "IPython.core.magics.packaging",
225 "IPython.core.magics.packaging",
227 "IPython.core.magics.pylab",
226 "IPython.core.magics.pylab",
228 "IPython.core.magics.script",
227 "IPython.core.magics.script",
229 "IPython.core.oinspect",
228 "IPython.core.oinspect",
230 "IPython.core.page",
229 "IPython.core.page",
231 "IPython.core.payload",
230 "IPython.core.payload",
232 "IPython.core.payloadpage",
231 "IPython.core.payloadpage",
233 "IPython.core.prefilter",
232 "IPython.core.prefilter",
234 "IPython.core.profiledir",
233 "IPython.core.profiledir",
235 "IPython.core.prompts",
234 "IPython.core.prompts",
236 "IPython.core.pylabtools",
235 "IPython.core.pylabtools",
237 "IPython.core.shellapp",
236 "IPython.core.shellapp",
238 "IPython.core.splitinput",
237 "IPython.core.splitinput",
239 "IPython.core.ultratb",
238 "IPython.core.ultratb",
240 "IPython.extensions.autoreload",
239 "IPython.extensions.autoreload",
241 "IPython.extensions.storemagic",
240 "IPython.extensions.storemagic",
242 "IPython.external.qt_for_kernel",
241 "IPython.external.qt_for_kernel",
243 "IPython.external.qt_loaders",
242 "IPython.external.qt_loaders",
244 "IPython.lib.backgroundjobs",
243 "IPython.lib.backgroundjobs",
245 "IPython.lib.clipboard",
244 "IPython.lib.clipboard",
246 "IPython.lib.demo",
245 "IPython.lib.demo",
247 "IPython.lib.display",
246 "IPython.lib.display",
248 "IPython.lib.editorhooks",
247 "IPython.lib.editorhooks",
249 "IPython.lib.guisupport",
248 "IPython.lib.guisupport",
250 "IPython.lib.latextools",
249 "IPython.lib.latextools",
251 "IPython.lib.lexers",
250 "IPython.lib.lexers",
252 "IPython.lib.pretty",
251 "IPython.lib.pretty",
253 "IPython.paths",
252 "IPython.paths",
254 "IPython.sphinxext.ipython_console_highlighting",
253 "IPython.sphinxext.ipython_console_highlighting",
255 "IPython.terminal.debugger",
254 "IPython.terminal.debugger",
256 "IPython.terminal.embed",
255 "IPython.terminal.embed",
257 "IPython.terminal.interactiveshell",
256 "IPython.terminal.interactiveshell",
258 "IPython.terminal.magics",
257 "IPython.terminal.magics",
259 "IPython.terminal.prompts",
258 "IPython.terminal.prompts",
260 "IPython.terminal.pt_inputhooks",
259 "IPython.terminal.pt_inputhooks",
261 "IPython.terminal.pt_inputhooks.asyncio",
260 "IPython.terminal.pt_inputhooks.asyncio",
262 "IPython.terminal.pt_inputhooks.glut",
261 "IPython.terminal.pt_inputhooks.glut",
263 "IPython.terminal.pt_inputhooks.gtk",
262 "IPython.terminal.pt_inputhooks.gtk",
264 "IPython.terminal.pt_inputhooks.gtk3",
263 "IPython.terminal.pt_inputhooks.gtk3",
265 "IPython.terminal.pt_inputhooks.gtk4",
264 "IPython.terminal.pt_inputhooks.gtk4",
266 "IPython.terminal.pt_inputhooks.osx",
265 "IPython.terminal.pt_inputhooks.osx",
267 "IPython.terminal.pt_inputhooks.pyglet",
266 "IPython.terminal.pt_inputhooks.pyglet",
268 "IPython.terminal.pt_inputhooks.qt",
267 "IPython.terminal.pt_inputhooks.qt",
269 "IPython.terminal.pt_inputhooks.tk",
268 "IPython.terminal.pt_inputhooks.tk",
270 "IPython.terminal.pt_inputhooks.wx",
269 "IPython.terminal.pt_inputhooks.wx",
271 "IPython.terminal.ptutils",
270 "IPython.terminal.ptutils",
272 "IPython.terminal.shortcuts",
271 "IPython.terminal.shortcuts",
273 "IPython.terminal.shortcuts.auto_match",
272 "IPython.terminal.shortcuts.auto_match",
274 "IPython.terminal.shortcuts.auto_suggest",
273 "IPython.terminal.shortcuts.auto_suggest",
275 "IPython.terminal.shortcuts.filters",
274 "IPython.terminal.shortcuts.filters",
276 "IPython.utils._process_cli",
275 "IPython.utils._process_cli",
277 "IPython.utils._process_common",
276 "IPython.utils._process_common",
278 "IPython.utils._process_emscripten",
277 "IPython.utils._process_emscripten",
279 "IPython.utils._process_posix",
278 "IPython.utils._process_posix",
280 "IPython.utils.capture",
279 "IPython.utils.capture",
281 "IPython.utils.coloransi",
280 "IPython.utils.coloransi",
282 "IPython.utils.contexts",
281 "IPython.utils.contexts",
283 "IPython.utils.data",
282 "IPython.utils.data",
284 "IPython.utils.decorators",
283 "IPython.utils.decorators",
285 "IPython.utils.dir2",
284 "IPython.utils.dir2",
286 "IPython.utils.encoding",
285 "IPython.utils.encoding",
287 "IPython.utils.frame",
286 "IPython.utils.frame",
288 "IPython.utils.generics",
287 "IPython.utils.generics",
289 "IPython.utils.importstring",
288 "IPython.utils.importstring",
290 "IPython.utils.io",
289 "IPython.utils.io",
291 "IPython.utils.ipstruct",
290 "IPython.utils.ipstruct",
292 "IPython.utils.module_paths",
291 "IPython.utils.module_paths",
293 "IPython.utils.openpy",
292 "IPython.utils.openpy",
294 "IPython.utils.process",
293 "IPython.utils.process",
295 "IPython.utils.py3compat",
294 "IPython.utils.py3compat",
296 "IPython.utils.sentinel",
295 "IPython.utils.sentinel",
297 "IPython.utils.shimmodule",
296 "IPython.utils.shimmodule",
298 "IPython.utils.strdispatch",
297 "IPython.utils.strdispatch",
299 "IPython.utils.sysinfo",
298 "IPython.utils.sysinfo",
300 "IPython.utils.syspathcontext",
299 "IPython.utils.syspathcontext",
301 "IPython.utils.tempdir",
300 "IPython.utils.tempdir",
302 "IPython.utils.terminal",
301 "IPython.utils.terminal",
303 "IPython.utils.timing",
302 "IPython.utils.timing",
304 "IPython.utils.tokenutil",
303 "IPython.utils.tokenutil",
305 "IPython.utils.tz",
304 "IPython.utils.tz",
306 "IPython.utils.ulinecache",
305 "IPython.utils.ulinecache",
307 "IPython.utils.version",
306 "IPython.utils.version",
308 "IPython.utils.wildcard",
307 "IPython.utils.wildcard",
309
308
310 ]
309 ]
311 disallow_untyped_defs = false
310 disallow_untyped_defs = false
312 ignore_errors = true
311 ignore_errors = true
313 ignore_missing_imports = true
312 ignore_missing_imports = true
314 disallow_untyped_calls = false
313 disallow_untyped_calls = false
315 disallow_incomplete_defs = false
314 disallow_incomplete_defs = false
316 check_untyped_defs = false
315 check_untyped_defs = false
317 disallow_untyped_decorators = false
316 disallow_untyped_decorators = false
318
317
319 [tool.pytest.ini_options]
318 [tool.pytest.ini_options]
320 addopts = [
319 addopts = [
321 "--durations=10",
320 "--durations=10",
322 "-pIPython.testing.plugin.pytest_ipdoctest",
321 "-pIPython.testing.plugin.pytest_ipdoctest",
323 "--ipdoctest-modules",
322 "--ipdoctest-modules",
324 "--ignore=docs",
323 "--ignore=docs",
325 "--ignore=examples",
324 "--ignore=examples",
326 "--ignore=htmlcov",
325 "--ignore=htmlcov",
327 "--ignore=ipython_kernel",
326 "--ignore=ipython_kernel",
328 "--ignore=ipython_parallel",
327 "--ignore=ipython_parallel",
329 "--ignore=results",
328 "--ignore=results",
330 "--ignore=tmp",
329 "--ignore=tmp",
331 "--ignore=tools",
330 "--ignore=tools",
332 "--ignore=traitlets",
331 "--ignore=traitlets",
333 "--ignore=IPython/core/tests/daft_extension",
332 "--ignore=IPython/core/tests/daft_extension",
334 "--ignore=IPython/sphinxext",
333 "--ignore=IPython/sphinxext",
335 "--ignore=IPython/terminal/pt_inputhooks",
334 "--ignore=IPython/terminal/pt_inputhooks",
336 "--ignore=IPython/__main__.py",
335 "--ignore=IPython/__main__.py",
337 "--ignore=IPython/external/qt_for_kernel.py",
336 "--ignore=IPython/external/qt_for_kernel.py",
338 "--ignore=IPython/html/widgets/widget_link.py",
337 "--ignore=IPython/html/widgets/widget_link.py",
339 "--ignore=IPython/html/widgets/widget_output.py",
338 "--ignore=IPython/html/widgets/widget_output.py",
340 "--ignore=IPython/terminal/console.py",
339 "--ignore=IPython/terminal/console.py",
341 "--ignore=IPython/utils/_process_cli.py",
340 "--ignore=IPython/utils/_process_cli.py",
342 "--ignore=IPython/utils/_process_posix.py",
341 "--ignore=IPython/utils/_process_posix.py",
343 "--ignore=IPython/utils/_process_win32.py",
342 "--ignore=IPython/utils/_process_win32.py",
344 "--ignore=IPython/utils/_process_win32_controller.py",
343 "--ignore=IPython/utils/_process_win32_controller.py",
345 "--ignore=IPython/utils/daemonize.py",
344 "--ignore=IPython/utils/daemonize.py",
346 "--ignore=IPython/utils/eventful.py",
345 "--ignore=IPython/utils/eventful.py",
347 "--ignore=IPython/kernel",
346 "--ignore=IPython/kernel",
348 "--ignore=IPython/consoleapp.py",
347 "--ignore=IPython/consoleapp.py",
349 "--ignore=IPython/core/inputsplitter.py",
348 "--ignore=IPython/core/inputsplitter.py",
350 "--ignore=IPython/lib/kernel.py",
349 "--ignore=IPython/lib/kernel.py",
351 "--ignore=IPython/utils/jsonutil.py",
350 "--ignore=IPython/utils/jsonutil.py",
352 "--ignore=IPython/utils/localinterfaces.py",
351 "--ignore=IPython/utils/localinterfaces.py",
353 "--ignore=IPython/utils/log.py",
352 "--ignore=IPython/utils/log.py",
354 "--ignore=IPython/utils/signatures.py",
353 "--ignore=IPython/utils/signatures.py",
355 "--ignore=IPython/utils/traitlets.py",
354 "--ignore=IPython/utils/traitlets.py",
356 "--ignore=IPython/utils/version.py"
355 "--ignore=IPython/utils/version.py"
357 ]
356 ]
358 doctest_optionflags = [
357 doctest_optionflags = [
359 "NORMALIZE_WHITESPACE",
358 "NORMALIZE_WHITESPACE",
360 "ELLIPSIS"
359 "ELLIPSIS"
361 ]
360 ]
362 ipdoctest_optionflags = [
361 ipdoctest_optionflags = [
363 "NORMALIZE_WHITESPACE",
362 "NORMALIZE_WHITESPACE",
364 "ELLIPSIS"
363 "ELLIPSIS"
365 ]
364 ]
366 asyncio_mode = "strict"
365 asyncio_mode = "strict"
367
366
368 [tool.pyright]
367 [tool.pyright]
369 pythonPlatform="All"
368 pythonPlatform="All"
370
369
371 [tool.setuptools]
370 [tool.setuptools]
372 zip-safe = false
371 zip-safe = false
373 platforms = ["Linux", "Mac OSX", "Windows"]
372 platforms = ["Linux", "Mac OSX", "Windows"]
374 license-files = ["LICENSE"]
373 license-files = ["LICENSE"]
375 include-package-data = false
374 include-package-data = false
376
375
377 [tool.setuptools.packages.find]
376 [tool.setuptools.packages.find]
378 exclude = ["setupext"]
377 exclude = ["setupext"]
379 namespaces = false
378 namespaces = false
380
379
381 [tool.setuptools.package-data]
380 [tool.setuptools.package-data]
382 "IPython" = ["py.typed"]
381 "IPython" = ["py.typed"]
383 "IPython.core" = ["profile/README*"]
382 "IPython.core" = ["profile/README*"]
384 "IPython.core.tests" = ["*.png", "*.jpg", "daft_extension/*.py"]
383 "IPython.core.tests" = ["*.png", "*.jpg", "daft_extension/*.py"]
385 "IPython.lib.tests" = ["*.wav"]
384 "IPython.lib.tests" = ["*.wav"]
386 "IPython.testing.plugin" = ["*.txt"]
385 "IPython.testing.plugin" = ["*.txt"]
387
386
388 [tool.setuptools.dynamic]
387 [tool.setuptools.dynamic]
389 version = {attr = "IPython.core.release.__version__"}
388 version = {attr = "IPython.core.release.__version__"}
390
389
391 [tool.coverage.run]
390 [tool.coverage.run]
392 omit = [
391 omit = [
393 # omit everything in /tmp as we run tempfile
392 # omit everything in /tmp as we run tempfile
394 "/tmp/*",
393 "/tmp/*",
395 ]
394 ]
General Comments 0
You need to be logged in to leave comments. Login now