##// END OF EJS Templates
Merge pull request #12395 from cphyc/feature/tuple-completion
Matthias Bussonnier -
r25854:fdd32759 merge
parent child Browse files
Show More
@@ -1,2218 +1,2262 b''
1 """Completion for IPython.
1 """Completion for IPython.
2
2
3 This module started as fork of the rlcompleter module in the Python standard
3 This module started as fork of the rlcompleter module in the Python standard
4 library. The original enhancements made to rlcompleter have been sent
4 library. The original enhancements made to rlcompleter have been sent
5 upstream and were accepted as of Python 2.3,
5 upstream and were accepted as of Python 2.3,
6
6
7 This module now support a wide variety of completion mechanism both available
7 This module now support a wide variety of completion mechanism both available
8 for normal classic Python code, as well as completer for IPython specific
8 for normal classic Python code, as well as completer for IPython specific
9 Syntax like magics.
9 Syntax like magics.
10
10
11 Latex and Unicode completion
11 Latex and Unicode completion
12 ============================
12 ============================
13
13
14 IPython and compatible frontends not only can complete your code, but can help
14 IPython and compatible frontends not only can complete your code, but can help
15 you to input a wide range of characters. In particular we allow you to insert
15 you to input a wide range of characters. In particular we allow you to insert
16 a unicode character using the tab completion mechanism.
16 a unicode character using the tab completion mechanism.
17
17
18 Forward latex/unicode completion
18 Forward latex/unicode completion
19 --------------------------------
19 --------------------------------
20
20
21 Forward completion allows you to easily type a unicode character using its latex
21 Forward completion allows you to easily type a unicode character using its latex
22 name, or unicode long description. To do so type a backslash follow by the
22 name, or unicode long description. To do so type a backslash follow by the
23 relevant name and press tab:
23 relevant name and press tab:
24
24
25
25
26 Using latex completion:
26 Using latex completion:
27
27
28 .. code::
28 .. code::
29
29
30 \\alpha<tab>
30 \\alpha<tab>
31 Ξ±
31 Ξ±
32
32
33 or using unicode completion:
33 or using unicode completion:
34
34
35
35
36 .. code::
36 .. code::
37
37
38 \\GREEK SMALL LETTER ALPHA<tab>
38 \\GREEK SMALL LETTER ALPHA<tab>
39 Ξ±
39 Ξ±
40
40
41
41
42 Only valid Python identifiers will complete. Combining characters (like arrow or
42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 dots) are also available, unlike latex they need to be put after the their
43 dots) are also available, unlike latex they need to be put after the their
44 counterpart that is to say, `F\\\\vec<tab>` is correct, not `\\\\vec<tab>F`.
44 counterpart that is to say, `F\\\\vec<tab>` is correct, not `\\\\vec<tab>F`.
45
45
46 Some browsers are known to display combining characters incorrectly.
46 Some browsers are known to display combining characters incorrectly.
47
47
48 Backward latex completion
48 Backward latex completion
49 -------------------------
49 -------------------------
50
50
51 It is sometime challenging to know how to type a character, if you are using
51 It is sometime challenging to know how to type a character, if you are using
52 IPython, or any compatible frontend you can prepend backslash to the character
52 IPython, or any compatible frontend you can prepend backslash to the character
53 and press `<tab>` to expand it to its latex form.
53 and press `<tab>` to expand it to its latex form.
54
54
55 .. code::
55 .. code::
56
56
57 \\Ξ±<tab>
57 \\Ξ±<tab>
58 \\alpha
58 \\alpha
59
59
60
60
61 Both forward and backward completions can be deactivated by setting the
61 Both forward and backward completions can be deactivated by setting the
62 ``Completer.backslash_combining_completions`` option to ``False``.
62 ``Completer.backslash_combining_completions`` option to ``False``.
63
63
64
64
65 Experimental
65 Experimental
66 ============
66 ============
67
67
68 Starting with IPython 6.0, this module can make use of the Jedi library to
68 Starting with IPython 6.0, this module can make use of the Jedi library to
69 generate completions both using static analysis of the code, and dynamically
69 generate completions both using static analysis of the code, and dynamically
70 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
70 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
71 for Python. The APIs attached to this new mechanism is unstable and will
71 for Python. The APIs attached to this new mechanism is unstable and will
72 raise unless use in an :any:`provisionalcompleter` context manager.
72 raise unless use in an :any:`provisionalcompleter` context manager.
73
73
74 You will find that the following are experimental:
74 You will find that the following are experimental:
75
75
76 - :any:`provisionalcompleter`
76 - :any:`provisionalcompleter`
77 - :any:`IPCompleter.completions`
77 - :any:`IPCompleter.completions`
78 - :any:`Completion`
78 - :any:`Completion`
79 - :any:`rectify_completions`
79 - :any:`rectify_completions`
80
80
81 .. note::
81 .. note::
82
82
83 better name for :any:`rectify_completions` ?
83 better name for :any:`rectify_completions` ?
84
84
85 We welcome any feedback on these new API, and we also encourage you to try this
85 We welcome any feedback on these new API, and we also encourage you to try this
86 module in debug mode (start IPython with ``--Completer.debug=True``) in order
86 module in debug mode (start IPython with ``--Completer.debug=True``) in order
87 to have extra logging information if :any:`jedi` is crashing, or if current
87 to have extra logging information if :any:`jedi` is crashing, or if current
88 IPython completer pending deprecations are returning results not yet handled
88 IPython completer pending deprecations are returning results not yet handled
89 by :any:`jedi`
89 by :any:`jedi`
90
90
91 Using Jedi for tab completion allow snippets like the following to work without
91 Using Jedi for tab completion allow snippets like the following to work without
92 having to execute any code:
92 having to execute any code:
93
93
94 >>> myvar = ['hello', 42]
94 >>> myvar = ['hello', 42]
95 ... myvar[1].bi<tab>
95 ... myvar[1].bi<tab>
96
96
97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 executing any code unlike the previously available ``IPCompleter.greedy``
98 executing any code unlike the previously available ``IPCompleter.greedy``
99 option.
99 option.
100
100
101 Be sure to update :any:`jedi` to the latest stable version or to try the
101 Be sure to update :any:`jedi` to the latest stable version or to try the
102 current development version to get better completions.
102 current development version to get better completions.
103 """
103 """
104
104
105
105
106 # Copyright (c) IPython Development Team.
106 # Copyright (c) IPython Development Team.
107 # Distributed under the terms of the Modified BSD License.
107 # Distributed under the terms of the Modified BSD License.
108 #
108 #
109 # Some of this code originated from rlcompleter in the Python standard library
109 # Some of this code originated from rlcompleter in the Python standard library
110 # Copyright (C) 2001 Python Software Foundation, www.python.org
110 # Copyright (C) 2001 Python Software Foundation, www.python.org
111
111
112
112
113 import builtins as builtin_mod
113 import builtins as builtin_mod
114 import glob
114 import glob
115 import inspect
115 import inspect
116 import itertools
116 import itertools
117 import keyword
117 import keyword
118 import os
118 import os
119 import re
119 import re
120 import string
120 import string
121 import sys
121 import sys
122 import time
122 import time
123 import unicodedata
123 import unicodedata
124 import uuid
124 import uuid
125 import warnings
125 import warnings
126 from contextlib import contextmanager
126 from contextlib import contextmanager
127 from importlib import import_module
127 from importlib import import_module
128 from types import SimpleNamespace
128 from types import SimpleNamespace
129 from typing import Iterable, Iterator, List, Tuple, Union, Any, Sequence, Dict, NamedTuple, Pattern, Optional
129 from typing import Iterable, Iterator, List, Tuple, Union, Any, Sequence, Dict, NamedTuple, Pattern, Optional
130
130
131 from IPython.core.error import TryNext
131 from IPython.core.error import TryNext
132 from IPython.core.inputtransformer2 import ESC_MAGIC
132 from IPython.core.inputtransformer2 import ESC_MAGIC
133 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
133 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
134 from IPython.core.oinspect import InspectColors
134 from IPython.core.oinspect import InspectColors
135 from IPython.utils import generics
135 from IPython.utils import generics
136 from IPython.utils.dir2 import dir2, get_real_method
136 from IPython.utils.dir2 import dir2, get_real_method
137 from IPython.utils.path import ensure_dir_exists
137 from IPython.utils.path import ensure_dir_exists
138 from IPython.utils.process import arg_split
138 from IPython.utils.process import arg_split
139 from traitlets import Bool, Enum, Int, List as ListTrait, Unicode, default, observe
139 from traitlets import Bool, Enum, Int, List as ListTrait, Unicode, default, observe
140 from traitlets.config.configurable import Configurable
140 from traitlets.config.configurable import Configurable
141
141
142 import __main__
142 import __main__
143
143
144 # skip module docstests
144 # skip module docstests
145 skip_doctest = True
145 skip_doctest = True
146
146
147 try:
147 try:
148 import jedi
148 import jedi
149 jedi.settings.case_insensitive_completion = False
149 jedi.settings.case_insensitive_completion = False
150 import jedi.api.helpers
150 import jedi.api.helpers
151 import jedi.api.classes
151 import jedi.api.classes
152 JEDI_INSTALLED = True
152 JEDI_INSTALLED = True
153 except ImportError:
153 except ImportError:
154 JEDI_INSTALLED = False
154 JEDI_INSTALLED = False
155 #-----------------------------------------------------------------------------
155 #-----------------------------------------------------------------------------
156 # Globals
156 # Globals
157 #-----------------------------------------------------------------------------
157 #-----------------------------------------------------------------------------
158
158
159 # ranges where we have most of the valid unicode names. We could be more finer
159 # ranges where we have most of the valid unicode names. We could be more finer
160 # grained but is it worth it for performace While unicode have character in the
160 # grained but is it worth it for performace While unicode have character in the
161 # rage 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
161 # rage 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
162 # write this). With below range we cover them all, with a density of ~67%
162 # write this). With below range we cover them all, with a density of ~67%
163 # biggest next gap we consider only adds up about 1% density and there are 600
163 # biggest next gap we consider only adds up about 1% density and there are 600
164 # gaps that would need hard coding.
164 # gaps that would need hard coding.
165 _UNICODE_RANGES = [(32, 0x3134b), (0xe0001, 0xe01f0)]
165 _UNICODE_RANGES = [(32, 0x3134b), (0xe0001, 0xe01f0)]
166
166
167 # Public API
167 # Public API
168 __all__ = ['Completer','IPCompleter']
168 __all__ = ['Completer','IPCompleter']
169
169
170 if sys.platform == 'win32':
170 if sys.platform == 'win32':
171 PROTECTABLES = ' '
171 PROTECTABLES = ' '
172 else:
172 else:
173 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
173 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
174
174
175 # Protect against returning an enormous number of completions which the frontend
175 # Protect against returning an enormous number of completions which the frontend
176 # may have trouble processing.
176 # may have trouble processing.
177 MATCHES_LIMIT = 500
177 MATCHES_LIMIT = 500
178
178
179 _deprecation_readline_sentinel = object()
179 _deprecation_readline_sentinel = object()
180
180
181
181
182 class ProvisionalCompleterWarning(FutureWarning):
182 class ProvisionalCompleterWarning(FutureWarning):
183 """
183 """
184 Exception raise by an experimental feature in this module.
184 Exception raise by an experimental feature in this module.
185
185
186 Wrap code in :any:`provisionalcompleter` context manager if you
186 Wrap code in :any:`provisionalcompleter` context manager if you
187 are certain you want to use an unstable feature.
187 are certain you want to use an unstable feature.
188 """
188 """
189 pass
189 pass
190
190
191 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
191 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
192
192
193 @contextmanager
193 @contextmanager
194 def provisionalcompleter(action='ignore'):
194 def provisionalcompleter(action='ignore'):
195 """
195 """
196
196
197
197
198 This context manager has to be used in any place where unstable completer
198 This context manager has to be used in any place where unstable completer
199 behavior and API may be called.
199 behavior and API may be called.
200
200
201 >>> with provisionalcompleter():
201 >>> with provisionalcompleter():
202 ... completer.do_experimental_things() # works
202 ... completer.do_experimental_things() # works
203
203
204 >>> completer.do_experimental_things() # raises.
204 >>> completer.do_experimental_things() # raises.
205
205
206 .. note:: Unstable
206 .. note:: Unstable
207
207
208 By using this context manager you agree that the API in use may change
208 By using this context manager you agree that the API in use may change
209 without warning, and that you won't complain if they do so.
209 without warning, and that you won't complain if they do so.
210
210
211 You also understand that, if the API is not to your liking, you should report
211 You also understand that, if the API is not to your liking, you should report
212 a bug to explain your use case upstream.
212 a bug to explain your use case upstream.
213
213
214 We'll be happy to get your feedback, feature requests, and improvements on
214 We'll be happy to get your feedback, feature requests, and improvements on
215 any of the unstable APIs!
215 any of the unstable APIs!
216 """
216 """
217 with warnings.catch_warnings():
217 with warnings.catch_warnings():
218 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
218 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
219 yield
219 yield
220
220
221
221
222 def has_open_quotes(s):
222 def has_open_quotes(s):
223 """Return whether a string has open quotes.
223 """Return whether a string has open quotes.
224
224
225 This simply counts whether the number of quote characters of either type in
225 This simply counts whether the number of quote characters of either type in
226 the string is odd.
226 the string is odd.
227
227
228 Returns
228 Returns
229 -------
229 -------
230 If there is an open quote, the quote character is returned. Else, return
230 If there is an open quote, the quote character is returned. Else, return
231 False.
231 False.
232 """
232 """
233 # We check " first, then ', so complex cases with nested quotes will get
233 # We check " first, then ', so complex cases with nested quotes will get
234 # the " to take precedence.
234 # the " to take precedence.
235 if s.count('"') % 2:
235 if s.count('"') % 2:
236 return '"'
236 return '"'
237 elif s.count("'") % 2:
237 elif s.count("'") % 2:
238 return "'"
238 return "'"
239 else:
239 else:
240 return False
240 return False
241
241
242
242
243 def protect_filename(s, protectables=PROTECTABLES):
243 def protect_filename(s, protectables=PROTECTABLES):
244 """Escape a string to protect certain characters."""
244 """Escape a string to protect certain characters."""
245 if set(s) & set(protectables):
245 if set(s) & set(protectables):
246 if sys.platform == "win32":
246 if sys.platform == "win32":
247 return '"' + s + '"'
247 return '"' + s + '"'
248 else:
248 else:
249 return "".join(("\\" + c if c in protectables else c) for c in s)
249 return "".join(("\\" + c if c in protectables else c) for c in s)
250 else:
250 else:
251 return s
251 return s
252
252
253
253
254 def expand_user(path:str) -> Tuple[str, bool, str]:
254 def expand_user(path:str) -> Tuple[str, bool, str]:
255 """Expand ``~``-style usernames in strings.
255 """Expand ``~``-style usernames in strings.
256
256
257 This is similar to :func:`os.path.expanduser`, but it computes and returns
257 This is similar to :func:`os.path.expanduser`, but it computes and returns
258 extra information that will be useful if the input was being used in
258 extra information that will be useful if the input was being used in
259 computing completions, and you wish to return the completions with the
259 computing completions, and you wish to return the completions with the
260 original '~' instead of its expanded value.
260 original '~' instead of its expanded value.
261
261
262 Parameters
262 Parameters
263 ----------
263 ----------
264 path : str
264 path : str
265 String to be expanded. If no ~ is present, the output is the same as the
265 String to be expanded. If no ~ is present, the output is the same as the
266 input.
266 input.
267
267
268 Returns
268 Returns
269 -------
269 -------
270 newpath : str
270 newpath : str
271 Result of ~ expansion in the input path.
271 Result of ~ expansion in the input path.
272 tilde_expand : bool
272 tilde_expand : bool
273 Whether any expansion was performed or not.
273 Whether any expansion was performed or not.
274 tilde_val : str
274 tilde_val : str
275 The value that ~ was replaced with.
275 The value that ~ was replaced with.
276 """
276 """
277 # Default values
277 # Default values
278 tilde_expand = False
278 tilde_expand = False
279 tilde_val = ''
279 tilde_val = ''
280 newpath = path
280 newpath = path
281
281
282 if path.startswith('~'):
282 if path.startswith('~'):
283 tilde_expand = True
283 tilde_expand = True
284 rest = len(path)-1
284 rest = len(path)-1
285 newpath = os.path.expanduser(path)
285 newpath = os.path.expanduser(path)
286 if rest:
286 if rest:
287 tilde_val = newpath[:-rest]
287 tilde_val = newpath[:-rest]
288 else:
288 else:
289 tilde_val = newpath
289 tilde_val = newpath
290
290
291 return newpath, tilde_expand, tilde_val
291 return newpath, tilde_expand, tilde_val
292
292
293
293
294 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
294 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
295 """Does the opposite of expand_user, with its outputs.
295 """Does the opposite of expand_user, with its outputs.
296 """
296 """
297 if tilde_expand:
297 if tilde_expand:
298 return path.replace(tilde_val, '~')
298 return path.replace(tilde_val, '~')
299 else:
299 else:
300 return path
300 return path
301
301
302
302
303 def completions_sorting_key(word):
303 def completions_sorting_key(word):
304 """key for sorting completions
304 """key for sorting completions
305
305
306 This does several things:
306 This does several things:
307
307
308 - Demote any completions starting with underscores to the end
308 - Demote any completions starting with underscores to the end
309 - Insert any %magic and %%cellmagic completions in the alphabetical order
309 - Insert any %magic and %%cellmagic completions in the alphabetical order
310 by their name
310 by their name
311 """
311 """
312 prio1, prio2 = 0, 0
312 prio1, prio2 = 0, 0
313
313
314 if word.startswith('__'):
314 if word.startswith('__'):
315 prio1 = 2
315 prio1 = 2
316 elif word.startswith('_'):
316 elif word.startswith('_'):
317 prio1 = 1
317 prio1 = 1
318
318
319 if word.endswith('='):
319 if word.endswith('='):
320 prio1 = -1
320 prio1 = -1
321
321
322 if word.startswith('%%'):
322 if word.startswith('%%'):
323 # If there's another % in there, this is something else, so leave it alone
323 # If there's another % in there, this is something else, so leave it alone
324 if not "%" in word[2:]:
324 if not "%" in word[2:]:
325 word = word[2:]
325 word = word[2:]
326 prio2 = 2
326 prio2 = 2
327 elif word.startswith('%'):
327 elif word.startswith('%'):
328 if not "%" in word[1:]:
328 if not "%" in word[1:]:
329 word = word[1:]
329 word = word[1:]
330 prio2 = 1
330 prio2 = 1
331
331
332 return prio1, word, prio2
332 return prio1, word, prio2
333
333
334
334
335 class _FakeJediCompletion:
335 class _FakeJediCompletion:
336 """
336 """
337 This is a workaround to communicate to the UI that Jedi has crashed and to
337 This is a workaround to communicate to the UI that Jedi has crashed and to
338 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
338 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
339
339
340 Added in IPython 6.0 so should likely be removed for 7.0
340 Added in IPython 6.0 so should likely be removed for 7.0
341
341
342 """
342 """
343
343
344 def __init__(self, name):
344 def __init__(self, name):
345
345
346 self.name = name
346 self.name = name
347 self.complete = name
347 self.complete = name
348 self.type = 'crashed'
348 self.type = 'crashed'
349 self.name_with_symbols = name
349 self.name_with_symbols = name
350 self.signature = ''
350 self.signature = ''
351 self._origin = 'fake'
351 self._origin = 'fake'
352
352
353 def __repr__(self):
353 def __repr__(self):
354 return '<Fake completion object jedi has crashed>'
354 return '<Fake completion object jedi has crashed>'
355
355
356
356
357 class Completion:
357 class Completion:
358 """
358 """
359 Completion object used and return by IPython completers.
359 Completion object used and return by IPython completers.
360
360
361 .. warning:: Unstable
361 .. warning:: Unstable
362
362
363 This function is unstable, API may change without warning.
363 This function is unstable, API may change without warning.
364 It will also raise unless use in proper context manager.
364 It will also raise unless use in proper context manager.
365
365
366 This act as a middle ground :any:`Completion` object between the
366 This act as a middle ground :any:`Completion` object between the
367 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
367 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
368 object. While Jedi need a lot of information about evaluator and how the
368 object. While Jedi need a lot of information about evaluator and how the
369 code should be ran/inspected, PromptToolkit (and other frontend) mostly
369 code should be ran/inspected, PromptToolkit (and other frontend) mostly
370 need user facing information.
370 need user facing information.
371
371
372 - Which range should be replaced replaced by what.
372 - Which range should be replaced replaced by what.
373 - Some metadata (like completion type), or meta information to displayed to
373 - Some metadata (like completion type), or meta information to displayed to
374 the use user.
374 the use user.
375
375
376 For debugging purpose we can also store the origin of the completion (``jedi``,
376 For debugging purpose we can also store the origin of the completion (``jedi``,
377 ``IPython.python_matches``, ``IPython.magics_matches``...).
377 ``IPython.python_matches``, ``IPython.magics_matches``...).
378 """
378 """
379
379
380 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
380 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
381
381
382 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
382 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
383 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
383 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
384 "It may change without warnings. "
384 "It may change without warnings. "
385 "Use in corresponding context manager.",
385 "Use in corresponding context manager.",
386 category=ProvisionalCompleterWarning, stacklevel=2)
386 category=ProvisionalCompleterWarning, stacklevel=2)
387
387
388 self.start = start
388 self.start = start
389 self.end = end
389 self.end = end
390 self.text = text
390 self.text = text
391 self.type = type
391 self.type = type
392 self.signature = signature
392 self.signature = signature
393 self._origin = _origin
393 self._origin = _origin
394
394
395 def __repr__(self):
395 def __repr__(self):
396 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
396 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
397 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
397 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
398
398
399 def __eq__(self, other)->Bool:
399 def __eq__(self, other)->Bool:
400 """
400 """
401 Equality and hash do not hash the type (as some completer may not be
401 Equality and hash do not hash the type (as some completer may not be
402 able to infer the type), but are use to (partially) de-duplicate
402 able to infer the type), but are use to (partially) de-duplicate
403 completion.
403 completion.
404
404
405 Completely de-duplicating completion is a bit tricker that just
405 Completely de-duplicating completion is a bit tricker that just
406 comparing as it depends on surrounding text, which Completions are not
406 comparing as it depends on surrounding text, which Completions are not
407 aware of.
407 aware of.
408 """
408 """
409 return self.start == other.start and \
409 return self.start == other.start and \
410 self.end == other.end and \
410 self.end == other.end and \
411 self.text == other.text
411 self.text == other.text
412
412
413 def __hash__(self):
413 def __hash__(self):
414 return hash((self.start, self.end, self.text))
414 return hash((self.start, self.end, self.text))
415
415
416
416
417 _IC = Iterable[Completion]
417 _IC = Iterable[Completion]
418
418
419
419
420 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
420 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
421 """
421 """
422 Deduplicate a set of completions.
422 Deduplicate a set of completions.
423
423
424 .. warning:: Unstable
424 .. warning:: Unstable
425
425
426 This function is unstable, API may change without warning.
426 This function is unstable, API may change without warning.
427
427
428 Parameters
428 Parameters
429 ----------
429 ----------
430 text: str
430 text: str
431 text that should be completed.
431 text that should be completed.
432 completions: Iterator[Completion]
432 completions: Iterator[Completion]
433 iterator over the completions to deduplicate
433 iterator over the completions to deduplicate
434
434
435 Yields
435 Yields
436 ------
436 ------
437 `Completions` objects
437 `Completions` objects
438
438
439
439
440 Completions coming from multiple sources, may be different but end up having
440 Completions coming from multiple sources, may be different but end up having
441 the same effect when applied to ``text``. If this is the case, this will
441 the same effect when applied to ``text``. If this is the case, this will
442 consider completions as equal and only emit the first encountered.
442 consider completions as equal and only emit the first encountered.
443
443
444 Not folded in `completions()` yet for debugging purpose, and to detect when
444 Not folded in `completions()` yet for debugging purpose, and to detect when
445 the IPython completer does return things that Jedi does not, but should be
445 the IPython completer does return things that Jedi does not, but should be
446 at some point.
446 at some point.
447 """
447 """
448 completions = list(completions)
448 completions = list(completions)
449 if not completions:
449 if not completions:
450 return
450 return
451
451
452 new_start = min(c.start for c in completions)
452 new_start = min(c.start for c in completions)
453 new_end = max(c.end for c in completions)
453 new_end = max(c.end for c in completions)
454
454
455 seen = set()
455 seen = set()
456 for c in completions:
456 for c in completions:
457 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
457 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
458 if new_text not in seen:
458 if new_text not in seen:
459 yield c
459 yield c
460 seen.add(new_text)
460 seen.add(new_text)
461
461
462
462
463 def rectify_completions(text: str, completions: _IC, *, _debug=False)->_IC:
463 def rectify_completions(text: str, completions: _IC, *, _debug=False)->_IC:
464 """
464 """
465 Rectify a set of completions to all have the same ``start`` and ``end``
465 Rectify a set of completions to all have the same ``start`` and ``end``
466
466
467 .. warning:: Unstable
467 .. warning:: Unstable
468
468
469 This function is unstable, API may change without warning.
469 This function is unstable, API may change without warning.
470 It will also raise unless use in proper context manager.
470 It will also raise unless use in proper context manager.
471
471
472 Parameters
472 Parameters
473 ----------
473 ----------
474 text: str
474 text: str
475 text that should be completed.
475 text that should be completed.
476 completions: Iterator[Completion]
476 completions: Iterator[Completion]
477 iterator over the completions to rectify
477 iterator over the completions to rectify
478
478
479
479
480 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
480 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
481 the Jupyter Protocol requires them to behave like so. This will readjust
481 the Jupyter Protocol requires them to behave like so. This will readjust
482 the completion to have the same ``start`` and ``end`` by padding both
482 the completion to have the same ``start`` and ``end`` by padding both
483 extremities with surrounding text.
483 extremities with surrounding text.
484
484
485 During stabilisation should support a ``_debug`` option to log which
485 During stabilisation should support a ``_debug`` option to log which
486 completion are return by the IPython completer and not found in Jedi in
486 completion are return by the IPython completer and not found in Jedi in
487 order to make upstream bug report.
487 order to make upstream bug report.
488 """
488 """
489 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
489 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
490 "It may change without warnings. "
490 "It may change without warnings. "
491 "Use in corresponding context manager.",
491 "Use in corresponding context manager.",
492 category=ProvisionalCompleterWarning, stacklevel=2)
492 category=ProvisionalCompleterWarning, stacklevel=2)
493
493
494 completions = list(completions)
494 completions = list(completions)
495 if not completions:
495 if not completions:
496 return
496 return
497 starts = (c.start for c in completions)
497 starts = (c.start for c in completions)
498 ends = (c.end for c in completions)
498 ends = (c.end for c in completions)
499
499
500 new_start = min(starts)
500 new_start = min(starts)
501 new_end = max(ends)
501 new_end = max(ends)
502
502
503 seen_jedi = set()
503 seen_jedi = set()
504 seen_python_matches = set()
504 seen_python_matches = set()
505 for c in completions:
505 for c in completions:
506 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
506 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
507 if c._origin == 'jedi':
507 if c._origin == 'jedi':
508 seen_jedi.add(new_text)
508 seen_jedi.add(new_text)
509 elif c._origin == 'IPCompleter.python_matches':
509 elif c._origin == 'IPCompleter.python_matches':
510 seen_python_matches.add(new_text)
510 seen_python_matches.add(new_text)
511 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
511 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
512 diff = seen_python_matches.difference(seen_jedi)
512 diff = seen_python_matches.difference(seen_jedi)
513 if diff and _debug:
513 if diff and _debug:
514 print('IPython.python matches have extras:', diff)
514 print('IPython.python matches have extras:', diff)
515
515
516
516
517 if sys.platform == 'win32':
517 if sys.platform == 'win32':
518 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
518 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
519 else:
519 else:
520 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
520 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
521
521
522 GREEDY_DELIMS = ' =\r\n'
522 GREEDY_DELIMS = ' =\r\n'
523
523
524
524
525 class CompletionSplitter(object):
525 class CompletionSplitter(object):
526 """An object to split an input line in a manner similar to readline.
526 """An object to split an input line in a manner similar to readline.
527
527
528 By having our own implementation, we can expose readline-like completion in
528 By having our own implementation, we can expose readline-like completion in
529 a uniform manner to all frontends. This object only needs to be given the
529 a uniform manner to all frontends. This object only needs to be given the
530 line of text to be split and the cursor position on said line, and it
530 line of text to be split and the cursor position on said line, and it
531 returns the 'word' to be completed on at the cursor after splitting the
531 returns the 'word' to be completed on at the cursor after splitting the
532 entire line.
532 entire line.
533
533
534 What characters are used as splitting delimiters can be controlled by
534 What characters are used as splitting delimiters can be controlled by
535 setting the ``delims`` attribute (this is a property that internally
535 setting the ``delims`` attribute (this is a property that internally
536 automatically builds the necessary regular expression)"""
536 automatically builds the necessary regular expression)"""
537
537
538 # Private interface
538 # Private interface
539
539
540 # A string of delimiter characters. The default value makes sense for
540 # A string of delimiter characters. The default value makes sense for
541 # IPython's most typical usage patterns.
541 # IPython's most typical usage patterns.
542 _delims = DELIMS
542 _delims = DELIMS
543
543
544 # The expression (a normal string) to be compiled into a regular expression
544 # The expression (a normal string) to be compiled into a regular expression
545 # for actual splitting. We store it as an attribute mostly for ease of
545 # for actual splitting. We store it as an attribute mostly for ease of
546 # debugging, since this type of code can be so tricky to debug.
546 # debugging, since this type of code can be so tricky to debug.
547 _delim_expr = None
547 _delim_expr = None
548
548
549 # The regular expression that does the actual splitting
549 # The regular expression that does the actual splitting
550 _delim_re = None
550 _delim_re = None
551
551
552 def __init__(self, delims=None):
552 def __init__(self, delims=None):
553 delims = CompletionSplitter._delims if delims is None else delims
553 delims = CompletionSplitter._delims if delims is None else delims
554 self.delims = delims
554 self.delims = delims
555
555
556 @property
556 @property
557 def delims(self):
557 def delims(self):
558 """Return the string of delimiter characters."""
558 """Return the string of delimiter characters."""
559 return self._delims
559 return self._delims
560
560
561 @delims.setter
561 @delims.setter
562 def delims(self, delims):
562 def delims(self, delims):
563 """Set the delimiters for line splitting."""
563 """Set the delimiters for line splitting."""
564 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
564 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
565 self._delim_re = re.compile(expr)
565 self._delim_re = re.compile(expr)
566 self._delims = delims
566 self._delims = delims
567 self._delim_expr = expr
567 self._delim_expr = expr
568
568
569 def split_line(self, line, cursor_pos=None):
569 def split_line(self, line, cursor_pos=None):
570 """Split a line of text with a cursor at the given position.
570 """Split a line of text with a cursor at the given position.
571 """
571 """
572 l = line if cursor_pos is None else line[:cursor_pos]
572 l = line if cursor_pos is None else line[:cursor_pos]
573 return self._delim_re.split(l)[-1]
573 return self._delim_re.split(l)[-1]
574
574
575
575
576
576
577 class Completer(Configurable):
577 class Completer(Configurable):
578
578
579 greedy = Bool(False,
579 greedy = Bool(False,
580 help="""Activate greedy completion
580 help="""Activate greedy completion
581 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
581 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
582
582
583 This will enable completion on elements of lists, results of function calls, etc.,
583 This will enable completion on elements of lists, results of function calls, etc.,
584 but can be unsafe because the code is actually evaluated on TAB.
584 but can be unsafe because the code is actually evaluated on TAB.
585 """
585 """
586 ).tag(config=True)
586 ).tag(config=True)
587
587
588 use_jedi = Bool(default_value=JEDI_INSTALLED,
588 use_jedi = Bool(default_value=JEDI_INSTALLED,
589 help="Experimental: Use Jedi to generate autocompletions. "
589 help="Experimental: Use Jedi to generate autocompletions. "
590 "Default to True if jedi is installed.").tag(config=True)
590 "Default to True if jedi is installed.").tag(config=True)
591
591
592 jedi_compute_type_timeout = Int(default_value=400,
592 jedi_compute_type_timeout = Int(default_value=400,
593 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
593 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
594 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
594 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
595 performance by preventing jedi to build its cache.
595 performance by preventing jedi to build its cache.
596 """).tag(config=True)
596 """).tag(config=True)
597
597
598 debug = Bool(default_value=False,
598 debug = Bool(default_value=False,
599 help='Enable debug for the Completer. Mostly print extra '
599 help='Enable debug for the Completer. Mostly print extra '
600 'information for experimental jedi integration.')\
600 'information for experimental jedi integration.')\
601 .tag(config=True)
601 .tag(config=True)
602
602
603 backslash_combining_completions = Bool(True,
603 backslash_combining_completions = Bool(True,
604 help="Enable unicode completions, e.g. \\alpha<tab> . "
604 help="Enable unicode completions, e.g. \\alpha<tab> . "
605 "Includes completion of latex commands, unicode names, and expanding "
605 "Includes completion of latex commands, unicode names, and expanding "
606 "unicode characters back to latex commands.").tag(config=True)
606 "unicode characters back to latex commands.").tag(config=True)
607
607
608
608
609
609
610 def __init__(self, namespace=None, global_namespace=None, **kwargs):
610 def __init__(self, namespace=None, global_namespace=None, **kwargs):
611 """Create a new completer for the command line.
611 """Create a new completer for the command line.
612
612
613 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
613 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
614
614
615 If unspecified, the default namespace where completions are performed
615 If unspecified, the default namespace where completions are performed
616 is __main__ (technically, __main__.__dict__). Namespaces should be
616 is __main__ (technically, __main__.__dict__). Namespaces should be
617 given as dictionaries.
617 given as dictionaries.
618
618
619 An optional second namespace can be given. This allows the completer
619 An optional second namespace can be given. This allows the completer
620 to handle cases where both the local and global scopes need to be
620 to handle cases where both the local and global scopes need to be
621 distinguished.
621 distinguished.
622 """
622 """
623
623
624 # Don't bind to namespace quite yet, but flag whether the user wants a
624 # Don't bind to namespace quite yet, but flag whether the user wants a
625 # specific namespace or to use __main__.__dict__. This will allow us
625 # specific namespace or to use __main__.__dict__. This will allow us
626 # to bind to __main__.__dict__ at completion time, not now.
626 # to bind to __main__.__dict__ at completion time, not now.
627 if namespace is None:
627 if namespace is None:
628 self.use_main_ns = True
628 self.use_main_ns = True
629 else:
629 else:
630 self.use_main_ns = False
630 self.use_main_ns = False
631 self.namespace = namespace
631 self.namespace = namespace
632
632
633 # The global namespace, if given, can be bound directly
633 # The global namespace, if given, can be bound directly
634 if global_namespace is None:
634 if global_namespace is None:
635 self.global_namespace = {}
635 self.global_namespace = {}
636 else:
636 else:
637 self.global_namespace = global_namespace
637 self.global_namespace = global_namespace
638
638
639 self.custom_matchers = []
639 self.custom_matchers = []
640
640
641 super(Completer, self).__init__(**kwargs)
641 super(Completer, self).__init__(**kwargs)
642
642
643 def complete(self, text, state):
643 def complete(self, text, state):
644 """Return the next possible completion for 'text'.
644 """Return the next possible completion for 'text'.
645
645
646 This is called successively with state == 0, 1, 2, ... until it
646 This is called successively with state == 0, 1, 2, ... until it
647 returns None. The completion should begin with 'text'.
647 returns None. The completion should begin with 'text'.
648
648
649 """
649 """
650 if self.use_main_ns:
650 if self.use_main_ns:
651 self.namespace = __main__.__dict__
651 self.namespace = __main__.__dict__
652
652
653 if state == 0:
653 if state == 0:
654 if "." in text:
654 if "." in text:
655 self.matches = self.attr_matches(text)
655 self.matches = self.attr_matches(text)
656 else:
656 else:
657 self.matches = self.global_matches(text)
657 self.matches = self.global_matches(text)
658 try:
658 try:
659 return self.matches[state]
659 return self.matches[state]
660 except IndexError:
660 except IndexError:
661 return None
661 return None
662
662
663 def global_matches(self, text):
663 def global_matches(self, text):
664 """Compute matches when text is a simple name.
664 """Compute matches when text is a simple name.
665
665
666 Return a list of all keywords, built-in functions and names currently
666 Return a list of all keywords, built-in functions and names currently
667 defined in self.namespace or self.global_namespace that match.
667 defined in self.namespace or self.global_namespace that match.
668
668
669 """
669 """
670 matches = []
670 matches = []
671 match_append = matches.append
671 match_append = matches.append
672 n = len(text)
672 n = len(text)
673 for lst in [keyword.kwlist,
673 for lst in [keyword.kwlist,
674 builtin_mod.__dict__.keys(),
674 builtin_mod.__dict__.keys(),
675 self.namespace.keys(),
675 self.namespace.keys(),
676 self.global_namespace.keys()]:
676 self.global_namespace.keys()]:
677 for word in lst:
677 for word in lst:
678 if word[:n] == text and word != "__builtins__":
678 if word[:n] == text and word != "__builtins__":
679 match_append(word)
679 match_append(word)
680
680
681 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
681 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
682 for lst in [self.namespace.keys(),
682 for lst in [self.namespace.keys(),
683 self.global_namespace.keys()]:
683 self.global_namespace.keys()]:
684 shortened = {"_".join([sub[0] for sub in word.split('_')]) : word
684 shortened = {"_".join([sub[0] for sub in word.split('_')]) : word
685 for word in lst if snake_case_re.match(word)}
685 for word in lst if snake_case_re.match(word)}
686 for word in shortened.keys():
686 for word in shortened.keys():
687 if word[:n] == text and word != "__builtins__":
687 if word[:n] == text and word != "__builtins__":
688 match_append(shortened[word])
688 match_append(shortened[word])
689 return matches
689 return matches
690
690
691 def attr_matches(self, text):
691 def attr_matches(self, text):
692 """Compute matches when text contains a dot.
692 """Compute matches when text contains a dot.
693
693
694 Assuming the text is of the form NAME.NAME....[NAME], and is
694 Assuming the text is of the form NAME.NAME....[NAME], and is
695 evaluatable in self.namespace or self.global_namespace, it will be
695 evaluatable in self.namespace or self.global_namespace, it will be
696 evaluated and its attributes (as revealed by dir()) are used as
696 evaluated and its attributes (as revealed by dir()) are used as
697 possible completions. (For class instances, class members are
697 possible completions. (For class instances, class members are
698 also considered.)
698 also considered.)
699
699
700 WARNING: this can still invoke arbitrary C code, if an object
700 WARNING: this can still invoke arbitrary C code, if an object
701 with a __getattr__ hook is evaluated.
701 with a __getattr__ hook is evaluated.
702
702
703 """
703 """
704
704
705 # Another option, seems to work great. Catches things like ''.<tab>
705 # Another option, seems to work great. Catches things like ''.<tab>
706 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
706 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
707
707
708 if m:
708 if m:
709 expr, attr = m.group(1, 3)
709 expr, attr = m.group(1, 3)
710 elif self.greedy:
710 elif self.greedy:
711 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
711 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
712 if not m2:
712 if not m2:
713 return []
713 return []
714 expr, attr = m2.group(1,2)
714 expr, attr = m2.group(1,2)
715 else:
715 else:
716 return []
716 return []
717
717
718 try:
718 try:
719 obj = eval(expr, self.namespace)
719 obj = eval(expr, self.namespace)
720 except:
720 except:
721 try:
721 try:
722 obj = eval(expr, self.global_namespace)
722 obj = eval(expr, self.global_namespace)
723 except:
723 except:
724 return []
724 return []
725
725
726 if self.limit_to__all__ and hasattr(obj, '__all__'):
726 if self.limit_to__all__ and hasattr(obj, '__all__'):
727 words = get__all__entries(obj)
727 words = get__all__entries(obj)
728 else:
728 else:
729 words = dir2(obj)
729 words = dir2(obj)
730
730
731 try:
731 try:
732 words = generics.complete_object(obj, words)
732 words = generics.complete_object(obj, words)
733 except TryNext:
733 except TryNext:
734 pass
734 pass
735 except AssertionError:
735 except AssertionError:
736 raise
736 raise
737 except Exception:
737 except Exception:
738 # Silence errors from completion function
738 # Silence errors from completion function
739 #raise # dbg
739 #raise # dbg
740 pass
740 pass
741 # Build match list to return
741 # Build match list to return
742 n = len(attr)
742 n = len(attr)
743 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
743 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
744
744
745
745
746 def get__all__entries(obj):
746 def get__all__entries(obj):
747 """returns the strings in the __all__ attribute"""
747 """returns the strings in the __all__ attribute"""
748 try:
748 try:
749 words = getattr(obj, '__all__')
749 words = getattr(obj, '__all__')
750 except:
750 except:
751 return []
751 return []
752
752
753 return [w for w in words if isinstance(w, str)]
753 return [w for w in words if isinstance(w, str)]
754
754
755
755
756 def match_dict_keys(keys: List[Union[str, bytes]], prefix: str, delims: str) -> Tuple[str, int, List[str]]:
756 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], prefix: str, delims: str,
757 extra_prefix: Optional[Tuple[str, bytes]]=None) -> Tuple[str, int, List[str]]:
757 """Used by dict_key_matches, matching the prefix to a list of keys
758 """Used by dict_key_matches, matching the prefix to a list of keys
758
759
759 Parameters
760 Parameters
760 ==========
761 ==========
761 keys:
762 keys:
762 list of keys in dictionary currently being completed.
763 list of keys in dictionary currently being completed.
763 prefix:
764 prefix:
764 Part of the text already typed by the user. e.g. `mydict[b'fo`
765 Part of the text already typed by the user. E.g. `mydict[b'fo`
765 delims:
766 delims:
766 String of delimiters to consider when finding the current key.
767 String of delimiters to consider when finding the current key.
768 extra_prefix: optional
769 Part of the text already typed in multi-key index cases. E.g. for
770 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
767
771
768 Returns
772 Returns
769 =======
773 =======
770
774
771 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
775 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
772 ``quote`` being the quote that need to be used to close current string.
776 ``quote`` being the quote that need to be used to close current string.
773 ``token_start`` the position where the replacement should start occurring,
777 ``token_start`` the position where the replacement should start occurring,
774 ``matches`` a list of replacement/completion
778 ``matches`` a list of replacement/completion
775
779
776 """
780 """
777 keys = [k for k in keys if isinstance(k, (str, bytes))]
781 prefix_tuple = extra_prefix if extra_prefix else ()
782 Nprefix = len(prefix_tuple)
783 def filter_prefix_tuple(key):
784 # Reject too short keys
785 if len(key) <= Nprefix:
786 return False
787 # Reject keys with non str/bytes in it
788 for k in key:
789 if not isinstance(k, (str, bytes)):
790 return False
791 # Reject keys that do not match the prefix
792 for k, pt in zip(key, prefix_tuple):
793 if k != pt:
794 return False
795 # All checks passed!
796 return True
797
798 filtered_keys:List[Union[str,bytes]] = []
799 def _add_to_filtered_keys(key):
800 if isinstance(key, (str, bytes)):
801 filtered_keys.append(key)
802
803 for k in keys:
804 if isinstance(k, tuple):
805 if filter_prefix_tuple(k):
806 _add_to_filtered_keys(k[Nprefix])
807 else:
808 _add_to_filtered_keys(k)
809
778 if not prefix:
810 if not prefix:
779 return '', 0, [repr(k) for k in keys
811 return '', 0, [repr(k) for k in filtered_keys]
780 if isinstance(k, (str, bytes))]
781 quote_match = re.search('["\']', prefix)
812 quote_match = re.search('["\']', prefix)
782 assert quote_match is not None # silence mypy
813 assert quote_match is not None # silence mypy
783 quote = quote_match.group()
814 quote = quote_match.group()
784 try:
815 try:
785 prefix_str = eval(prefix + quote, {})
816 prefix_str = eval(prefix + quote, {})
786 except Exception:
817 except Exception:
787 return '', 0, []
818 return '', 0, []
788
819
789 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
820 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
790 token_match = re.search(pattern, prefix, re.UNICODE)
821 token_match = re.search(pattern, prefix, re.UNICODE)
791 assert token_match is not None # silence mypy
822 assert token_match is not None # silence mypy
792 token_start = token_match.start()
823 token_start = token_match.start()
793 token_prefix = token_match.group()
824 token_prefix = token_match.group()
794
825
795 matched:List[str] = []
826 matched:List[str] = []
796 for key in keys:
827 for key in filtered_keys:
797 try:
828 try:
798 if not key.startswith(prefix_str):
829 if not key.startswith(prefix_str):
799 continue
830 continue
800 except (AttributeError, TypeError, UnicodeError):
831 except (AttributeError, TypeError, UnicodeError):
801 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
832 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
802 continue
833 continue
803
834
804 # reformat remainder of key to begin with prefix
835 # reformat remainder of key to begin with prefix
805 rem = key[len(prefix_str):]
836 rem = key[len(prefix_str):]
806 # force repr wrapped in '
837 # force repr wrapped in '
807 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
838 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
808 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
839 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
809 if quote == '"':
840 if quote == '"':
810 # The entered prefix is quoted with ",
841 # The entered prefix is quoted with ",
811 # but the match is quoted with '.
842 # but the match is quoted with '.
812 # A contained " hence needs escaping for comparison:
843 # A contained " hence needs escaping for comparison:
813 rem_repr = rem_repr.replace('"', '\\"')
844 rem_repr = rem_repr.replace('"', '\\"')
814
845
815 # then reinsert prefix from start of token
846 # then reinsert prefix from start of token
816 matched.append('%s%s' % (token_prefix, rem_repr))
847 matched.append('%s%s' % (token_prefix, rem_repr))
817 return quote, token_start, matched
848 return quote, token_start, matched
818
849
819
850
820 def cursor_to_position(text:str, line:int, column:int)->int:
851 def cursor_to_position(text:str, line:int, column:int)->int:
821 """
852 """
822
853
823 Convert the (line,column) position of the cursor in text to an offset in a
854 Convert the (line,column) position of the cursor in text to an offset in a
824 string.
855 string.
825
856
826 Parameters
857 Parameters
827 ----------
858 ----------
828
859
829 text : str
860 text : str
830 The text in which to calculate the cursor offset
861 The text in which to calculate the cursor offset
831 line : int
862 line : int
832 Line of the cursor; 0-indexed
863 Line of the cursor; 0-indexed
833 column : int
864 column : int
834 Column of the cursor 0-indexed
865 Column of the cursor 0-indexed
835
866
836 Return
867 Return
837 ------
868 ------
838 Position of the cursor in ``text``, 0-indexed.
869 Position of the cursor in ``text``, 0-indexed.
839
870
840 See Also
871 See Also
841 --------
872 --------
842 position_to_cursor: reciprocal of this function
873 position_to_cursor: reciprocal of this function
843
874
844 """
875 """
845 lines = text.split('\n')
876 lines = text.split('\n')
846 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
877 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
847
878
848 return sum(len(l) + 1 for l in lines[:line]) + column
879 return sum(len(l) + 1 for l in lines[:line]) + column
849
880
850 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
881 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
851 """
882 """
852 Convert the position of the cursor in text (0 indexed) to a line
883 Convert the position of the cursor in text (0 indexed) to a line
853 number(0-indexed) and a column number (0-indexed) pair
884 number(0-indexed) and a column number (0-indexed) pair
854
885
855 Position should be a valid position in ``text``.
886 Position should be a valid position in ``text``.
856
887
857 Parameters
888 Parameters
858 ----------
889 ----------
859
890
860 text : str
891 text : str
861 The text in which to calculate the cursor offset
892 The text in which to calculate the cursor offset
862 offset : int
893 offset : int
863 Position of the cursor in ``text``, 0-indexed.
894 Position of the cursor in ``text``, 0-indexed.
864
895
865 Return
896 Return
866 ------
897 ------
867 (line, column) : (int, int)
898 (line, column) : (int, int)
868 Line of the cursor; 0-indexed, column of the cursor 0-indexed
899 Line of the cursor; 0-indexed, column of the cursor 0-indexed
869
900
870
901
871 See Also
902 See Also
872 --------
903 --------
873 cursor_to_position : reciprocal of this function
904 cursor_to_position : reciprocal of this function
874
905
875
906
876 """
907 """
877
908
878 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
909 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
879
910
880 before = text[:offset]
911 before = text[:offset]
881 blines = before.split('\n') # ! splitnes trim trailing \n
912 blines = before.split('\n') # ! splitnes trim trailing \n
882 line = before.count('\n')
913 line = before.count('\n')
883 col = len(blines[-1])
914 col = len(blines[-1])
884 return line, col
915 return line, col
885
916
886
917
887 def _safe_isinstance(obj, module, class_name):
918 def _safe_isinstance(obj, module, class_name):
888 """Checks if obj is an instance of module.class_name if loaded
919 """Checks if obj is an instance of module.class_name if loaded
889 """
920 """
890 return (module in sys.modules and
921 return (module in sys.modules and
891 isinstance(obj, getattr(import_module(module), class_name)))
922 isinstance(obj, getattr(import_module(module), class_name)))
892
923
893 def back_unicode_name_matches(text:str) -> Tuple[str, Sequence[str]]:
924 def back_unicode_name_matches(text:str) -> Tuple[str, Sequence[str]]:
894 """Match Unicode characters back to Unicode name
925 """Match Unicode characters back to Unicode name
895
926
896 This does ``β˜ƒ`` -> ``\\snowman``
927 This does ``β˜ƒ`` -> ``\\snowman``
897
928
898 Note that snowman is not a valid python3 combining character but will be expanded.
929 Note that snowman is not a valid python3 combining character but will be expanded.
899 Though it will not recombine back to the snowman character by the completion machinery.
930 Though it will not recombine back to the snowman character by the completion machinery.
900
931
901 This will not either back-complete standard sequences like \\n, \\b ...
932 This will not either back-complete standard sequences like \\n, \\b ...
902
933
903 Returns
934 Returns
904 =======
935 =======
905
936
906 Return a tuple with two elements:
937 Return a tuple with two elements:
907
938
908 - The Unicode character that was matched (preceded with a backslash), or
939 - The Unicode character that was matched (preceded with a backslash), or
909 empty string,
940 empty string,
910 - a sequence (of 1), name for the match Unicode character, preceded by
941 - a sequence (of 1), name for the match Unicode character, preceded by
911 backslash, or empty if no match.
942 backslash, or empty if no match.
912
943
913 """
944 """
914 if len(text)<2:
945 if len(text)<2:
915 return '', ()
946 return '', ()
916 maybe_slash = text[-2]
947 maybe_slash = text[-2]
917 if maybe_slash != '\\':
948 if maybe_slash != '\\':
918 return '', ()
949 return '', ()
919
950
920 char = text[-1]
951 char = text[-1]
921 # no expand on quote for completion in strings.
952 # no expand on quote for completion in strings.
922 # nor backcomplete standard ascii keys
953 # nor backcomplete standard ascii keys
923 if char in string.ascii_letters or char in ('"',"'"):
954 if char in string.ascii_letters or char in ('"',"'"):
924 return '', ()
955 return '', ()
925 try :
956 try :
926 unic = unicodedata.name(char)
957 unic = unicodedata.name(char)
927 return '\\'+char,('\\'+unic,)
958 return '\\'+char,('\\'+unic,)
928 except KeyError:
959 except KeyError:
929 pass
960 pass
930 return '', ()
961 return '', ()
931
962
932 def back_latex_name_matches(text:str) -> Tuple[str, Sequence[str]] :
963 def back_latex_name_matches(text:str) -> Tuple[str, Sequence[str]] :
933 """Match latex characters back to unicode name
964 """Match latex characters back to unicode name
934
965
935 This does ``\\β„΅`` -> ``\\aleph``
966 This does ``\\β„΅`` -> ``\\aleph``
936
967
937 """
968 """
938 if len(text)<2:
969 if len(text)<2:
939 return '', ()
970 return '', ()
940 maybe_slash = text[-2]
971 maybe_slash = text[-2]
941 if maybe_slash != '\\':
972 if maybe_slash != '\\':
942 return '', ()
973 return '', ()
943
974
944
975
945 char = text[-1]
976 char = text[-1]
946 # no expand on quote for completion in strings.
977 # no expand on quote for completion in strings.
947 # nor backcomplete standard ascii keys
978 # nor backcomplete standard ascii keys
948 if char in string.ascii_letters or char in ('"',"'"):
979 if char in string.ascii_letters or char in ('"',"'"):
949 return '', ()
980 return '', ()
950 try :
981 try :
951 latex = reverse_latex_symbol[char]
982 latex = reverse_latex_symbol[char]
952 # '\\' replace the \ as well
983 # '\\' replace the \ as well
953 return '\\'+char,[latex]
984 return '\\'+char,[latex]
954 except KeyError:
985 except KeyError:
955 pass
986 pass
956 return '', ()
987 return '', ()
957
988
958
989
959 def _formatparamchildren(parameter) -> str:
990 def _formatparamchildren(parameter) -> str:
960 """
991 """
961 Get parameter name and value from Jedi Private API
992 Get parameter name and value from Jedi Private API
962
993
963 Jedi does not expose a simple way to get `param=value` from its API.
994 Jedi does not expose a simple way to get `param=value` from its API.
964
995
965 Parameter
996 Parameter
966 =========
997 =========
967
998
968 parameter:
999 parameter:
969 Jedi's function `Param`
1000 Jedi's function `Param`
970
1001
971 Returns
1002 Returns
972 =======
1003 =======
973
1004
974 A string like 'a', 'b=1', '*args', '**kwargs'
1005 A string like 'a', 'b=1', '*args', '**kwargs'
975
1006
976
1007
977 """
1008 """
978 description = parameter.description
1009 description = parameter.description
979 if not description.startswith('param '):
1010 if not description.startswith('param '):
980 raise ValueError('Jedi function parameter description have change format.'
1011 raise ValueError('Jedi function parameter description have change format.'
981 'Expected "param ...", found %r".' % description)
1012 'Expected "param ...", found %r".' % description)
982 return description[6:]
1013 return description[6:]
983
1014
984 def _make_signature(completion)-> str:
1015 def _make_signature(completion)-> str:
985 """
1016 """
986 Make the signature from a jedi completion
1017 Make the signature from a jedi completion
987
1018
988 Parameter
1019 Parameter
989 =========
1020 =========
990
1021
991 completion: jedi.Completion
1022 completion: jedi.Completion
992 object does not complete a function type
1023 object does not complete a function type
993
1024
994 Returns
1025 Returns
995 =======
1026 =======
996
1027
997 a string consisting of the function signature, with the parenthesis but
1028 a string consisting of the function signature, with the parenthesis but
998 without the function name. example:
1029 without the function name. example:
999 `(a, *args, b=1, **kwargs)`
1030 `(a, *args, b=1, **kwargs)`
1000
1031
1001 """
1032 """
1002
1033
1003 # it looks like this might work on jedi 0.17
1034 # it looks like this might work on jedi 0.17
1004 if hasattr(completion, 'get_signatures'):
1035 if hasattr(completion, 'get_signatures'):
1005 signatures = completion.get_signatures()
1036 signatures = completion.get_signatures()
1006 if not signatures:
1037 if not signatures:
1007 return '(?)'
1038 return '(?)'
1008
1039
1009 c0 = completion.get_signatures()[0]
1040 c0 = completion.get_signatures()[0]
1010 return '('+c0.to_string().split('(', maxsplit=1)[1]
1041 return '('+c0.to_string().split('(', maxsplit=1)[1]
1011
1042
1012 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1043 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1013 for p in signature.defined_names()) if f])
1044 for p in signature.defined_names()) if f])
1014
1045
1015
1046
1016 class _CompleteResult(NamedTuple):
1047 class _CompleteResult(NamedTuple):
1017 matched_text : str
1048 matched_text : str
1018 matches: Sequence[str]
1049 matches: Sequence[str]
1019 matches_origin: Sequence[str]
1050 matches_origin: Sequence[str]
1020 jedi_matches: Any
1051 jedi_matches: Any
1021
1052
1022
1053
1023 class IPCompleter(Completer):
1054 class IPCompleter(Completer):
1024 """Extension of the completer class with IPython-specific features"""
1055 """Extension of the completer class with IPython-specific features"""
1025
1056
1026 __dict_key_regexps: Optional[Dict[bool,Pattern]] = None
1057 __dict_key_regexps: Optional[Dict[bool,Pattern]] = None
1027
1058
1028 @observe('greedy')
1059 @observe('greedy')
1029 def _greedy_changed(self, change):
1060 def _greedy_changed(self, change):
1030 """update the splitter and readline delims when greedy is changed"""
1061 """update the splitter and readline delims when greedy is changed"""
1031 if change['new']:
1062 if change['new']:
1032 self.splitter.delims = GREEDY_DELIMS
1063 self.splitter.delims = GREEDY_DELIMS
1033 else:
1064 else:
1034 self.splitter.delims = DELIMS
1065 self.splitter.delims = DELIMS
1035
1066
1036 dict_keys_only = Bool(False,
1067 dict_keys_only = Bool(False,
1037 help="""Whether to show dict key matches only""")
1068 help="""Whether to show dict key matches only""")
1038
1069
1039 merge_completions = Bool(True,
1070 merge_completions = Bool(True,
1040 help="""Whether to merge completion results into a single list
1071 help="""Whether to merge completion results into a single list
1041
1072
1042 If False, only the completion results from the first non-empty
1073 If False, only the completion results from the first non-empty
1043 completer will be returned.
1074 completer will be returned.
1044 """
1075 """
1045 ).tag(config=True)
1076 ).tag(config=True)
1046 omit__names = Enum((0,1,2), default_value=2,
1077 omit__names = Enum((0,1,2), default_value=2,
1047 help="""Instruct the completer to omit private method names
1078 help="""Instruct the completer to omit private method names
1048
1079
1049 Specifically, when completing on ``object.<tab>``.
1080 Specifically, when completing on ``object.<tab>``.
1050
1081
1051 When 2 [default]: all names that start with '_' will be excluded.
1082 When 2 [default]: all names that start with '_' will be excluded.
1052
1083
1053 When 1: all 'magic' names (``__foo__``) will be excluded.
1084 When 1: all 'magic' names (``__foo__``) will be excluded.
1054
1085
1055 When 0: nothing will be excluded.
1086 When 0: nothing will be excluded.
1056 """
1087 """
1057 ).tag(config=True)
1088 ).tag(config=True)
1058 limit_to__all__ = Bool(False,
1089 limit_to__all__ = Bool(False,
1059 help="""
1090 help="""
1060 DEPRECATED as of version 5.0.
1091 DEPRECATED as of version 5.0.
1061
1092
1062 Instruct the completer to use __all__ for the completion
1093 Instruct the completer to use __all__ for the completion
1063
1094
1064 Specifically, when completing on ``object.<tab>``.
1095 Specifically, when completing on ``object.<tab>``.
1065
1096
1066 When True: only those names in obj.__all__ will be included.
1097 When True: only those names in obj.__all__ will be included.
1067
1098
1068 When False [default]: the __all__ attribute is ignored
1099 When False [default]: the __all__ attribute is ignored
1069 """,
1100 """,
1070 ).tag(config=True)
1101 ).tag(config=True)
1071
1102
1072 profile_completions = Bool(
1103 profile_completions = Bool(
1073 default_value=False,
1104 default_value=False,
1074 help="If True, emit profiling data for completion subsystem using cProfile."
1105 help="If True, emit profiling data for completion subsystem using cProfile."
1075 ).tag(config=True)
1106 ).tag(config=True)
1076
1107
1077 profiler_output_dir = Unicode(
1108 profiler_output_dir = Unicode(
1078 default_value=".completion_profiles",
1109 default_value=".completion_profiles",
1079 help="Template for path at which to output profile data for completions."
1110 help="Template for path at which to output profile data for completions."
1080 ).tag(config=True)
1111 ).tag(config=True)
1081
1112
1082 @observe('limit_to__all__')
1113 @observe('limit_to__all__')
1083 def _limit_to_all_changed(self, change):
1114 def _limit_to_all_changed(self, change):
1084 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1115 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1085 'value has been deprecated since IPython 5.0, will be made to have '
1116 'value has been deprecated since IPython 5.0, will be made to have '
1086 'no effects and then removed in future version of IPython.',
1117 'no effects and then removed in future version of IPython.',
1087 UserWarning)
1118 UserWarning)
1088
1119
1089 def __init__(self, shell=None, namespace=None, global_namespace=None,
1120 def __init__(self, shell=None, namespace=None, global_namespace=None,
1090 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
1121 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
1091 """IPCompleter() -> completer
1122 """IPCompleter() -> completer
1092
1123
1093 Return a completer object.
1124 Return a completer object.
1094
1125
1095 Parameters
1126 Parameters
1096 ----------
1127 ----------
1097
1128
1098 shell
1129 shell
1099 a pointer to the ipython shell itself. This is needed
1130 a pointer to the ipython shell itself. This is needed
1100 because this completer knows about magic functions, and those can
1131 because this completer knows about magic functions, and those can
1101 only be accessed via the ipython instance.
1132 only be accessed via the ipython instance.
1102
1133
1103 namespace : dict, optional
1134 namespace : dict, optional
1104 an optional dict where completions are performed.
1135 an optional dict where completions are performed.
1105
1136
1106 global_namespace : dict, optional
1137 global_namespace : dict, optional
1107 secondary optional dict for completions, to
1138 secondary optional dict for completions, to
1108 handle cases (such as IPython embedded inside functions) where
1139 handle cases (such as IPython embedded inside functions) where
1109 both Python scopes are visible.
1140 both Python scopes are visible.
1110
1141
1111 use_readline : bool, optional
1142 use_readline : bool, optional
1112 DEPRECATED, ignored since IPython 6.0, will have no effects
1143 DEPRECATED, ignored since IPython 6.0, will have no effects
1113 """
1144 """
1114
1145
1115 self.magic_escape = ESC_MAGIC
1146 self.magic_escape = ESC_MAGIC
1116 self.splitter = CompletionSplitter()
1147 self.splitter = CompletionSplitter()
1117
1148
1118 if use_readline is not _deprecation_readline_sentinel:
1149 if use_readline is not _deprecation_readline_sentinel:
1119 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
1150 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
1120 DeprecationWarning, stacklevel=2)
1151 DeprecationWarning, stacklevel=2)
1121
1152
1122 # _greedy_changed() depends on splitter and readline being defined:
1153 # _greedy_changed() depends on splitter and readline being defined:
1123 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
1154 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
1124 config=config, **kwargs)
1155 config=config, **kwargs)
1125
1156
1126 # List where completion matches will be stored
1157 # List where completion matches will be stored
1127 self.matches = []
1158 self.matches = []
1128 self.shell = shell
1159 self.shell = shell
1129 # Regexp to split filenames with spaces in them
1160 # Regexp to split filenames with spaces in them
1130 self.space_name_re = re.compile(r'([^\\] )')
1161 self.space_name_re = re.compile(r'([^\\] )')
1131 # Hold a local ref. to glob.glob for speed
1162 # Hold a local ref. to glob.glob for speed
1132 self.glob = glob.glob
1163 self.glob = glob.glob
1133
1164
1134 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1165 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1135 # buffers, to avoid completion problems.
1166 # buffers, to avoid completion problems.
1136 term = os.environ.get('TERM','xterm')
1167 term = os.environ.get('TERM','xterm')
1137 self.dumb_terminal = term in ['dumb','emacs']
1168 self.dumb_terminal = term in ['dumb','emacs']
1138
1169
1139 # Special handling of backslashes needed in win32 platforms
1170 # Special handling of backslashes needed in win32 platforms
1140 if sys.platform == "win32":
1171 if sys.platform == "win32":
1141 self.clean_glob = self._clean_glob_win32
1172 self.clean_glob = self._clean_glob_win32
1142 else:
1173 else:
1143 self.clean_glob = self._clean_glob
1174 self.clean_glob = self._clean_glob
1144
1175
1145 #regexp to parse docstring for function signature
1176 #regexp to parse docstring for function signature
1146 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1177 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1147 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1178 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1148 #use this if positional argument name is also needed
1179 #use this if positional argument name is also needed
1149 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1180 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1150
1181
1151 self.magic_arg_matchers = [
1182 self.magic_arg_matchers = [
1152 self.magic_config_matches,
1183 self.magic_config_matches,
1153 self.magic_color_matches,
1184 self.magic_color_matches,
1154 ]
1185 ]
1155
1186
1156 # This is set externally by InteractiveShell
1187 # This is set externally by InteractiveShell
1157 self.custom_completers = None
1188 self.custom_completers = None
1158
1189
1159 # This is a list of names of unicode characters that can be completed
1190 # This is a list of names of unicode characters that can be completed
1160 # into their corresponding unicode value. The list is large, so we
1191 # into their corresponding unicode value. The list is large, so we
1161 # laziliy initialize it on first use. Consuming code should access this
1192 # laziliy initialize it on first use. Consuming code should access this
1162 # attribute through the `@unicode_names` property.
1193 # attribute through the `@unicode_names` property.
1163 self._unicode_names = None
1194 self._unicode_names = None
1164
1195
1165 @property
1196 @property
1166 def matchers(self) -> List[Any]:
1197 def matchers(self) -> List[Any]:
1167 """All active matcher routines for completion"""
1198 """All active matcher routines for completion"""
1168 if self.dict_keys_only:
1199 if self.dict_keys_only:
1169 return [self.dict_key_matches]
1200 return [self.dict_key_matches]
1170
1201
1171 if self.use_jedi:
1202 if self.use_jedi:
1172 return [
1203 return [
1173 *self.custom_matchers,
1204 *self.custom_matchers,
1174 self.file_matches,
1205 self.file_matches,
1175 self.magic_matches,
1206 self.magic_matches,
1176 self.dict_key_matches,
1207 self.dict_key_matches,
1177 ]
1208 ]
1178 else:
1209 else:
1179 return [
1210 return [
1180 *self.custom_matchers,
1211 *self.custom_matchers,
1181 self.python_matches,
1212 self.python_matches,
1182 self.file_matches,
1213 self.file_matches,
1183 self.magic_matches,
1214 self.magic_matches,
1184 self.python_func_kw_matches,
1215 self.python_func_kw_matches,
1185 self.dict_key_matches,
1216 self.dict_key_matches,
1186 ]
1217 ]
1187
1218
1188 def all_completions(self, text:str) -> List[str]:
1219 def all_completions(self, text:str) -> List[str]:
1189 """
1220 """
1190 Wrapper around the completion methods for the benefit of emacs.
1221 Wrapper around the completion methods for the benefit of emacs.
1191 """
1222 """
1192 prefix = text.rpartition('.')[0]
1223 prefix = text.rpartition('.')[0]
1193 with provisionalcompleter():
1224 with provisionalcompleter():
1194 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1225 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1195 for c in self.completions(text, len(text))]
1226 for c in self.completions(text, len(text))]
1196
1227
1197 return self.complete(text)[1]
1228 return self.complete(text)[1]
1198
1229
1199 def _clean_glob(self, text:str):
1230 def _clean_glob(self, text:str):
1200 return self.glob("%s*" % text)
1231 return self.glob("%s*" % text)
1201
1232
1202 def _clean_glob_win32(self, text:str):
1233 def _clean_glob_win32(self, text:str):
1203 return [f.replace("\\","/")
1234 return [f.replace("\\","/")
1204 for f in self.glob("%s*" % text)]
1235 for f in self.glob("%s*" % text)]
1205
1236
1206 def file_matches(self, text:str)->List[str]:
1237 def file_matches(self, text:str)->List[str]:
1207 """Match filenames, expanding ~USER type strings.
1238 """Match filenames, expanding ~USER type strings.
1208
1239
1209 Most of the seemingly convoluted logic in this completer is an
1240 Most of the seemingly convoluted logic in this completer is an
1210 attempt to handle filenames with spaces in them. And yet it's not
1241 attempt to handle filenames with spaces in them. And yet it's not
1211 quite perfect, because Python's readline doesn't expose all of the
1242 quite perfect, because Python's readline doesn't expose all of the
1212 GNU readline details needed for this to be done correctly.
1243 GNU readline details needed for this to be done correctly.
1213
1244
1214 For a filename with a space in it, the printed completions will be
1245 For a filename with a space in it, the printed completions will be
1215 only the parts after what's already been typed (instead of the
1246 only the parts after what's already been typed (instead of the
1216 full completions, as is normally done). I don't think with the
1247 full completions, as is normally done). I don't think with the
1217 current (as of Python 2.3) Python readline it's possible to do
1248 current (as of Python 2.3) Python readline it's possible to do
1218 better."""
1249 better."""
1219
1250
1220 # chars that require escaping with backslash - i.e. chars
1251 # chars that require escaping with backslash - i.e. chars
1221 # that readline treats incorrectly as delimiters, but we
1252 # that readline treats incorrectly as delimiters, but we
1222 # don't want to treat as delimiters in filename matching
1253 # don't want to treat as delimiters in filename matching
1223 # when escaped with backslash
1254 # when escaped with backslash
1224 if text.startswith('!'):
1255 if text.startswith('!'):
1225 text = text[1:]
1256 text = text[1:]
1226 text_prefix = u'!'
1257 text_prefix = u'!'
1227 else:
1258 else:
1228 text_prefix = u''
1259 text_prefix = u''
1229
1260
1230 text_until_cursor = self.text_until_cursor
1261 text_until_cursor = self.text_until_cursor
1231 # track strings with open quotes
1262 # track strings with open quotes
1232 open_quotes = has_open_quotes(text_until_cursor)
1263 open_quotes = has_open_quotes(text_until_cursor)
1233
1264
1234 if '(' in text_until_cursor or '[' in text_until_cursor:
1265 if '(' in text_until_cursor or '[' in text_until_cursor:
1235 lsplit = text
1266 lsplit = text
1236 else:
1267 else:
1237 try:
1268 try:
1238 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1269 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1239 lsplit = arg_split(text_until_cursor)[-1]
1270 lsplit = arg_split(text_until_cursor)[-1]
1240 except ValueError:
1271 except ValueError:
1241 # typically an unmatched ", or backslash without escaped char.
1272 # typically an unmatched ", or backslash without escaped char.
1242 if open_quotes:
1273 if open_quotes:
1243 lsplit = text_until_cursor.split(open_quotes)[-1]
1274 lsplit = text_until_cursor.split(open_quotes)[-1]
1244 else:
1275 else:
1245 return []
1276 return []
1246 except IndexError:
1277 except IndexError:
1247 # tab pressed on empty line
1278 # tab pressed on empty line
1248 lsplit = ""
1279 lsplit = ""
1249
1280
1250 if not open_quotes and lsplit != protect_filename(lsplit):
1281 if not open_quotes and lsplit != protect_filename(lsplit):
1251 # if protectables are found, do matching on the whole escaped name
1282 # if protectables are found, do matching on the whole escaped name
1252 has_protectables = True
1283 has_protectables = True
1253 text0,text = text,lsplit
1284 text0,text = text,lsplit
1254 else:
1285 else:
1255 has_protectables = False
1286 has_protectables = False
1256 text = os.path.expanduser(text)
1287 text = os.path.expanduser(text)
1257
1288
1258 if text == "":
1289 if text == "":
1259 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1290 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1260
1291
1261 # Compute the matches from the filesystem
1292 # Compute the matches from the filesystem
1262 if sys.platform == 'win32':
1293 if sys.platform == 'win32':
1263 m0 = self.clean_glob(text)
1294 m0 = self.clean_glob(text)
1264 else:
1295 else:
1265 m0 = self.clean_glob(text.replace('\\', ''))
1296 m0 = self.clean_glob(text.replace('\\', ''))
1266
1297
1267 if has_protectables:
1298 if has_protectables:
1268 # If we had protectables, we need to revert our changes to the
1299 # If we had protectables, we need to revert our changes to the
1269 # beginning of filename so that we don't double-write the part
1300 # beginning of filename so that we don't double-write the part
1270 # of the filename we have so far
1301 # of the filename we have so far
1271 len_lsplit = len(lsplit)
1302 len_lsplit = len(lsplit)
1272 matches = [text_prefix + text0 +
1303 matches = [text_prefix + text0 +
1273 protect_filename(f[len_lsplit:]) for f in m0]
1304 protect_filename(f[len_lsplit:]) for f in m0]
1274 else:
1305 else:
1275 if open_quotes:
1306 if open_quotes:
1276 # if we have a string with an open quote, we don't need to
1307 # if we have a string with an open quote, we don't need to
1277 # protect the names beyond the quote (and we _shouldn't_, as
1308 # protect the names beyond the quote (and we _shouldn't_, as
1278 # it would cause bugs when the filesystem call is made).
1309 # it would cause bugs when the filesystem call is made).
1279 matches = m0 if sys.platform == "win32" else\
1310 matches = m0 if sys.platform == "win32" else\
1280 [protect_filename(f, open_quotes) for f in m0]
1311 [protect_filename(f, open_quotes) for f in m0]
1281 else:
1312 else:
1282 matches = [text_prefix +
1313 matches = [text_prefix +
1283 protect_filename(f) for f in m0]
1314 protect_filename(f) for f in m0]
1284
1315
1285 # Mark directories in input list by appending '/' to their names.
1316 # Mark directories in input list by appending '/' to their names.
1286 return [x+'/' if os.path.isdir(x) else x for x in matches]
1317 return [x+'/' if os.path.isdir(x) else x for x in matches]
1287
1318
1288 def magic_matches(self, text:str):
1319 def magic_matches(self, text:str):
1289 """Match magics"""
1320 """Match magics"""
1290 # Get all shell magics now rather than statically, so magics loaded at
1321 # Get all shell magics now rather than statically, so magics loaded at
1291 # runtime show up too.
1322 # runtime show up too.
1292 lsm = self.shell.magics_manager.lsmagic()
1323 lsm = self.shell.magics_manager.lsmagic()
1293 line_magics = lsm['line']
1324 line_magics = lsm['line']
1294 cell_magics = lsm['cell']
1325 cell_magics = lsm['cell']
1295 pre = self.magic_escape
1326 pre = self.magic_escape
1296 pre2 = pre+pre
1327 pre2 = pre+pre
1297
1328
1298 explicit_magic = text.startswith(pre)
1329 explicit_magic = text.startswith(pre)
1299
1330
1300 # Completion logic:
1331 # Completion logic:
1301 # - user gives %%: only do cell magics
1332 # - user gives %%: only do cell magics
1302 # - user gives %: do both line and cell magics
1333 # - user gives %: do both line and cell magics
1303 # - no prefix: do both
1334 # - no prefix: do both
1304 # In other words, line magics are skipped if the user gives %% explicitly
1335 # In other words, line magics are skipped if the user gives %% explicitly
1305 #
1336 #
1306 # We also exclude magics that match any currently visible names:
1337 # We also exclude magics that match any currently visible names:
1307 # https://github.com/ipython/ipython/issues/4877, unless the user has
1338 # https://github.com/ipython/ipython/issues/4877, unless the user has
1308 # typed a %:
1339 # typed a %:
1309 # https://github.com/ipython/ipython/issues/10754
1340 # https://github.com/ipython/ipython/issues/10754
1310 bare_text = text.lstrip(pre)
1341 bare_text = text.lstrip(pre)
1311 global_matches = self.global_matches(bare_text)
1342 global_matches = self.global_matches(bare_text)
1312 if not explicit_magic:
1343 if not explicit_magic:
1313 def matches(magic):
1344 def matches(magic):
1314 """
1345 """
1315 Filter magics, in particular remove magics that match
1346 Filter magics, in particular remove magics that match
1316 a name present in global namespace.
1347 a name present in global namespace.
1317 """
1348 """
1318 return ( magic.startswith(bare_text) and
1349 return ( magic.startswith(bare_text) and
1319 magic not in global_matches )
1350 magic not in global_matches )
1320 else:
1351 else:
1321 def matches(magic):
1352 def matches(magic):
1322 return magic.startswith(bare_text)
1353 return magic.startswith(bare_text)
1323
1354
1324 comp = [ pre2+m for m in cell_magics if matches(m)]
1355 comp = [ pre2+m for m in cell_magics if matches(m)]
1325 if not text.startswith(pre2):
1356 if not text.startswith(pre2):
1326 comp += [ pre+m for m in line_magics if matches(m)]
1357 comp += [ pre+m for m in line_magics if matches(m)]
1327
1358
1328 return comp
1359 return comp
1329
1360
1330 def magic_config_matches(self, text:str) -> List[str]:
1361 def magic_config_matches(self, text:str) -> List[str]:
1331 """ Match class names and attributes for %config magic """
1362 """ Match class names and attributes for %config magic """
1332 texts = text.strip().split()
1363 texts = text.strip().split()
1333
1364
1334 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
1365 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
1335 # get all configuration classes
1366 # get all configuration classes
1336 classes = sorted(set([ c for c in self.shell.configurables
1367 classes = sorted(set([ c for c in self.shell.configurables
1337 if c.__class__.class_traits(config=True)
1368 if c.__class__.class_traits(config=True)
1338 ]), key=lambda x: x.__class__.__name__)
1369 ]), key=lambda x: x.__class__.__name__)
1339 classnames = [ c.__class__.__name__ for c in classes ]
1370 classnames = [ c.__class__.__name__ for c in classes ]
1340
1371
1341 # return all classnames if config or %config is given
1372 # return all classnames if config or %config is given
1342 if len(texts) == 1:
1373 if len(texts) == 1:
1343 return classnames
1374 return classnames
1344
1375
1345 # match classname
1376 # match classname
1346 classname_texts = texts[1].split('.')
1377 classname_texts = texts[1].split('.')
1347 classname = classname_texts[0]
1378 classname = classname_texts[0]
1348 classname_matches = [ c for c in classnames
1379 classname_matches = [ c for c in classnames
1349 if c.startswith(classname) ]
1380 if c.startswith(classname) ]
1350
1381
1351 # return matched classes or the matched class with attributes
1382 # return matched classes or the matched class with attributes
1352 if texts[1].find('.') < 0:
1383 if texts[1].find('.') < 0:
1353 return classname_matches
1384 return classname_matches
1354 elif len(classname_matches) == 1 and \
1385 elif len(classname_matches) == 1 and \
1355 classname_matches[0] == classname:
1386 classname_matches[0] == classname:
1356 cls = classes[classnames.index(classname)].__class__
1387 cls = classes[classnames.index(classname)].__class__
1357 help = cls.class_get_help()
1388 help = cls.class_get_help()
1358 # strip leading '--' from cl-args:
1389 # strip leading '--' from cl-args:
1359 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1390 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1360 return [ attr.split('=')[0]
1391 return [ attr.split('=')[0]
1361 for attr in help.strip().splitlines()
1392 for attr in help.strip().splitlines()
1362 if attr.startswith(texts[1]) ]
1393 if attr.startswith(texts[1]) ]
1363 return []
1394 return []
1364
1395
1365 def magic_color_matches(self, text:str) -> List[str] :
1396 def magic_color_matches(self, text:str) -> List[str] :
1366 """ Match color schemes for %colors magic"""
1397 """ Match color schemes for %colors magic"""
1367 texts = text.split()
1398 texts = text.split()
1368 if text.endswith(' '):
1399 if text.endswith(' '):
1369 # .split() strips off the trailing whitespace. Add '' back
1400 # .split() strips off the trailing whitespace. Add '' back
1370 # so that: '%colors ' -> ['%colors', '']
1401 # so that: '%colors ' -> ['%colors', '']
1371 texts.append('')
1402 texts.append('')
1372
1403
1373 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
1404 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
1374 prefix = texts[1]
1405 prefix = texts[1]
1375 return [ color for color in InspectColors.keys()
1406 return [ color for color in InspectColors.keys()
1376 if color.startswith(prefix) ]
1407 if color.startswith(prefix) ]
1377 return []
1408 return []
1378
1409
1379 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str) -> Iterable[Any]:
1410 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str) -> Iterable[Any]:
1380 """
1411 """
1381
1412
1382 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1413 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1383 cursor position.
1414 cursor position.
1384
1415
1385 Parameters
1416 Parameters
1386 ----------
1417 ----------
1387 cursor_column : int
1418 cursor_column : int
1388 column position of the cursor in ``text``, 0-indexed.
1419 column position of the cursor in ``text``, 0-indexed.
1389 cursor_line : int
1420 cursor_line : int
1390 line position of the cursor in ``text``, 0-indexed
1421 line position of the cursor in ``text``, 0-indexed
1391 text : str
1422 text : str
1392 text to complete
1423 text to complete
1393
1424
1394 Debugging
1425 Debugging
1395 ---------
1426 ---------
1396
1427
1397 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1428 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1398 object containing a string with the Jedi debug information attached.
1429 object containing a string with the Jedi debug information attached.
1399 """
1430 """
1400 namespaces = [self.namespace]
1431 namespaces = [self.namespace]
1401 if self.global_namespace is not None:
1432 if self.global_namespace is not None:
1402 namespaces.append(self.global_namespace)
1433 namespaces.append(self.global_namespace)
1403
1434
1404 completion_filter = lambda x:x
1435 completion_filter = lambda x:x
1405 offset = cursor_to_position(text, cursor_line, cursor_column)
1436 offset = cursor_to_position(text, cursor_line, cursor_column)
1406 # filter output if we are completing for object members
1437 # filter output if we are completing for object members
1407 if offset:
1438 if offset:
1408 pre = text[offset-1]
1439 pre = text[offset-1]
1409 if pre == '.':
1440 if pre == '.':
1410 if self.omit__names == 2:
1441 if self.omit__names == 2:
1411 completion_filter = lambda c:not c.name.startswith('_')
1442 completion_filter = lambda c:not c.name.startswith('_')
1412 elif self.omit__names == 1:
1443 elif self.omit__names == 1:
1413 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1444 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1414 elif self.omit__names == 0:
1445 elif self.omit__names == 0:
1415 completion_filter = lambda x:x
1446 completion_filter = lambda x:x
1416 else:
1447 else:
1417 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1448 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1418
1449
1419 interpreter = jedi.Interpreter(text[:offset], namespaces)
1450 interpreter = jedi.Interpreter(text[:offset], namespaces)
1420 try_jedi = True
1451 try_jedi = True
1421
1452
1422 try:
1453 try:
1423 # find the first token in the current tree -- if it is a ' or " then we are in a string
1454 # find the first token in the current tree -- if it is a ' or " then we are in a string
1424 completing_string = False
1455 completing_string = False
1425 try:
1456 try:
1426 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
1457 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
1427 except StopIteration:
1458 except StopIteration:
1428 pass
1459 pass
1429 else:
1460 else:
1430 # note the value may be ', ", or it may also be ''' or """, or
1461 # note the value may be ', ", or it may also be ''' or """, or
1431 # in some cases, """what/you/typed..., but all of these are
1462 # in some cases, """what/you/typed..., but all of these are
1432 # strings.
1463 # strings.
1433 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
1464 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
1434
1465
1435 # if we are in a string jedi is likely not the right candidate for
1466 # if we are in a string jedi is likely not the right candidate for
1436 # now. Skip it.
1467 # now. Skip it.
1437 try_jedi = not completing_string
1468 try_jedi = not completing_string
1438 except Exception as e:
1469 except Exception as e:
1439 # many of things can go wrong, we are using private API just don't crash.
1470 # many of things can go wrong, we are using private API just don't crash.
1440 if self.debug:
1471 if self.debug:
1441 print("Error detecting if completing a non-finished string :", e, '|')
1472 print("Error detecting if completing a non-finished string :", e, '|')
1442
1473
1443 if not try_jedi:
1474 if not try_jedi:
1444 return []
1475 return []
1445 try:
1476 try:
1446 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
1477 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
1447 except Exception as e:
1478 except Exception as e:
1448 if self.debug:
1479 if self.debug:
1449 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1480 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1450 else:
1481 else:
1451 return []
1482 return []
1452
1483
1453 def python_matches(self, text:str)->List[str]:
1484 def python_matches(self, text:str)->List[str]:
1454 """Match attributes or global python names"""
1485 """Match attributes or global python names"""
1455 if "." in text:
1486 if "." in text:
1456 try:
1487 try:
1457 matches = self.attr_matches(text)
1488 matches = self.attr_matches(text)
1458 if text.endswith('.') and self.omit__names:
1489 if text.endswith('.') and self.omit__names:
1459 if self.omit__names == 1:
1490 if self.omit__names == 1:
1460 # true if txt is _not_ a __ name, false otherwise:
1491 # true if txt is _not_ a __ name, false otherwise:
1461 no__name = (lambda txt:
1492 no__name = (lambda txt:
1462 re.match(r'.*\.__.*?__',txt) is None)
1493 re.match(r'.*\.__.*?__',txt) is None)
1463 else:
1494 else:
1464 # true if txt is _not_ a _ name, false otherwise:
1495 # true if txt is _not_ a _ name, false otherwise:
1465 no__name = (lambda txt:
1496 no__name = (lambda txt:
1466 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1497 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1467 matches = filter(no__name, matches)
1498 matches = filter(no__name, matches)
1468 except NameError:
1499 except NameError:
1469 # catches <undefined attributes>.<tab>
1500 # catches <undefined attributes>.<tab>
1470 matches = []
1501 matches = []
1471 else:
1502 else:
1472 matches = self.global_matches(text)
1503 matches = self.global_matches(text)
1473 return matches
1504 return matches
1474
1505
1475 def _default_arguments_from_docstring(self, doc):
1506 def _default_arguments_from_docstring(self, doc):
1476 """Parse the first line of docstring for call signature.
1507 """Parse the first line of docstring for call signature.
1477
1508
1478 Docstring should be of the form 'min(iterable[, key=func])\n'.
1509 Docstring should be of the form 'min(iterable[, key=func])\n'.
1479 It can also parse cython docstring of the form
1510 It can also parse cython docstring of the form
1480 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1511 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1481 """
1512 """
1482 if doc is None:
1513 if doc is None:
1483 return []
1514 return []
1484
1515
1485 #care only the firstline
1516 #care only the firstline
1486 line = doc.lstrip().splitlines()[0]
1517 line = doc.lstrip().splitlines()[0]
1487
1518
1488 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1519 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1489 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1520 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1490 sig = self.docstring_sig_re.search(line)
1521 sig = self.docstring_sig_re.search(line)
1491 if sig is None:
1522 if sig is None:
1492 return []
1523 return []
1493 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1524 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1494 sig = sig.groups()[0].split(',')
1525 sig = sig.groups()[0].split(',')
1495 ret = []
1526 ret = []
1496 for s in sig:
1527 for s in sig:
1497 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1528 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1498 ret += self.docstring_kwd_re.findall(s)
1529 ret += self.docstring_kwd_re.findall(s)
1499 return ret
1530 return ret
1500
1531
1501 def _default_arguments(self, obj):
1532 def _default_arguments(self, obj):
1502 """Return the list of default arguments of obj if it is callable,
1533 """Return the list of default arguments of obj if it is callable,
1503 or empty list otherwise."""
1534 or empty list otherwise."""
1504 call_obj = obj
1535 call_obj = obj
1505 ret = []
1536 ret = []
1506 if inspect.isbuiltin(obj):
1537 if inspect.isbuiltin(obj):
1507 pass
1538 pass
1508 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1539 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1509 if inspect.isclass(obj):
1540 if inspect.isclass(obj):
1510 #for cython embedsignature=True the constructor docstring
1541 #for cython embedsignature=True the constructor docstring
1511 #belongs to the object itself not __init__
1542 #belongs to the object itself not __init__
1512 ret += self._default_arguments_from_docstring(
1543 ret += self._default_arguments_from_docstring(
1513 getattr(obj, '__doc__', ''))
1544 getattr(obj, '__doc__', ''))
1514 # for classes, check for __init__,__new__
1545 # for classes, check for __init__,__new__
1515 call_obj = (getattr(obj, '__init__', None) or
1546 call_obj = (getattr(obj, '__init__', None) or
1516 getattr(obj, '__new__', None))
1547 getattr(obj, '__new__', None))
1517 # for all others, check if they are __call__able
1548 # for all others, check if they are __call__able
1518 elif hasattr(obj, '__call__'):
1549 elif hasattr(obj, '__call__'):
1519 call_obj = obj.__call__
1550 call_obj = obj.__call__
1520 ret += self._default_arguments_from_docstring(
1551 ret += self._default_arguments_from_docstring(
1521 getattr(call_obj, '__doc__', ''))
1552 getattr(call_obj, '__doc__', ''))
1522
1553
1523 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1554 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1524 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1555 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1525
1556
1526 try:
1557 try:
1527 sig = inspect.signature(call_obj)
1558 sig = inspect.signature(call_obj)
1528 ret.extend(k for k, v in sig.parameters.items() if
1559 ret.extend(k for k, v in sig.parameters.items() if
1529 v.kind in _keeps)
1560 v.kind in _keeps)
1530 except ValueError:
1561 except ValueError:
1531 pass
1562 pass
1532
1563
1533 return list(set(ret))
1564 return list(set(ret))
1534
1565
1535 def python_func_kw_matches(self, text):
1566 def python_func_kw_matches(self, text):
1536 """Match named parameters (kwargs) of the last open function"""
1567 """Match named parameters (kwargs) of the last open function"""
1537
1568
1538 if "." in text: # a parameter cannot be dotted
1569 if "." in text: # a parameter cannot be dotted
1539 return []
1570 return []
1540 try: regexp = self.__funcParamsRegex
1571 try: regexp = self.__funcParamsRegex
1541 except AttributeError:
1572 except AttributeError:
1542 regexp = self.__funcParamsRegex = re.compile(r'''
1573 regexp = self.__funcParamsRegex = re.compile(r'''
1543 '.*?(?<!\\)' | # single quoted strings or
1574 '.*?(?<!\\)' | # single quoted strings or
1544 ".*?(?<!\\)" | # double quoted strings or
1575 ".*?(?<!\\)" | # double quoted strings or
1545 \w+ | # identifier
1576 \w+ | # identifier
1546 \S # other characters
1577 \S # other characters
1547 ''', re.VERBOSE | re.DOTALL)
1578 ''', re.VERBOSE | re.DOTALL)
1548 # 1. find the nearest identifier that comes before an unclosed
1579 # 1. find the nearest identifier that comes before an unclosed
1549 # parenthesis before the cursor
1580 # parenthesis before the cursor
1550 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1581 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1551 tokens = regexp.findall(self.text_until_cursor)
1582 tokens = regexp.findall(self.text_until_cursor)
1552 iterTokens = reversed(tokens); openPar = 0
1583 iterTokens = reversed(tokens); openPar = 0
1553
1584
1554 for token in iterTokens:
1585 for token in iterTokens:
1555 if token == ')':
1586 if token == ')':
1556 openPar -= 1
1587 openPar -= 1
1557 elif token == '(':
1588 elif token == '(':
1558 openPar += 1
1589 openPar += 1
1559 if openPar > 0:
1590 if openPar > 0:
1560 # found the last unclosed parenthesis
1591 # found the last unclosed parenthesis
1561 break
1592 break
1562 else:
1593 else:
1563 return []
1594 return []
1564 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1595 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1565 ids = []
1596 ids = []
1566 isId = re.compile(r'\w+$').match
1597 isId = re.compile(r'\w+$').match
1567
1598
1568 while True:
1599 while True:
1569 try:
1600 try:
1570 ids.append(next(iterTokens))
1601 ids.append(next(iterTokens))
1571 if not isId(ids[-1]):
1602 if not isId(ids[-1]):
1572 ids.pop(); break
1603 ids.pop(); break
1573 if not next(iterTokens) == '.':
1604 if not next(iterTokens) == '.':
1574 break
1605 break
1575 except StopIteration:
1606 except StopIteration:
1576 break
1607 break
1577
1608
1578 # Find all named arguments already assigned to, as to avoid suggesting
1609 # Find all named arguments already assigned to, as to avoid suggesting
1579 # them again
1610 # them again
1580 usedNamedArgs = set()
1611 usedNamedArgs = set()
1581 par_level = -1
1612 par_level = -1
1582 for token, next_token in zip(tokens, tokens[1:]):
1613 for token, next_token in zip(tokens, tokens[1:]):
1583 if token == '(':
1614 if token == '(':
1584 par_level += 1
1615 par_level += 1
1585 elif token == ')':
1616 elif token == ')':
1586 par_level -= 1
1617 par_level -= 1
1587
1618
1588 if par_level != 0:
1619 if par_level != 0:
1589 continue
1620 continue
1590
1621
1591 if next_token != '=':
1622 if next_token != '=':
1592 continue
1623 continue
1593
1624
1594 usedNamedArgs.add(token)
1625 usedNamedArgs.add(token)
1595
1626
1596 argMatches = []
1627 argMatches = []
1597 try:
1628 try:
1598 callableObj = '.'.join(ids[::-1])
1629 callableObj = '.'.join(ids[::-1])
1599 namedArgs = self._default_arguments(eval(callableObj,
1630 namedArgs = self._default_arguments(eval(callableObj,
1600 self.namespace))
1631 self.namespace))
1601
1632
1602 # Remove used named arguments from the list, no need to show twice
1633 # Remove used named arguments from the list, no need to show twice
1603 for namedArg in set(namedArgs) - usedNamedArgs:
1634 for namedArg in set(namedArgs) - usedNamedArgs:
1604 if namedArg.startswith(text):
1635 if namedArg.startswith(text):
1605 argMatches.append("%s=" %namedArg)
1636 argMatches.append("%s=" %namedArg)
1606 except:
1637 except:
1607 pass
1638 pass
1608
1639
1609 return argMatches
1640 return argMatches
1610
1641
1611 @staticmethod
1642 @staticmethod
1612 def _get_keys(obj: Any) -> List[Any]:
1643 def _get_keys(obj: Any) -> List[Any]:
1613 # Objects can define their own completions by defining an
1644 # Objects can define their own completions by defining an
1614 # _ipy_key_completions_() method.
1645 # _ipy_key_completions_() method.
1615 method = get_real_method(obj, '_ipython_key_completions_')
1646 method = get_real_method(obj, '_ipython_key_completions_')
1616 if method is not None:
1647 if method is not None:
1617 return method()
1648 return method()
1618
1649
1619 # Special case some common in-memory dict-like types
1650 # Special case some common in-memory dict-like types
1620 if isinstance(obj, dict) or\
1651 if isinstance(obj, dict) or\
1621 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1652 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1622 try:
1653 try:
1623 return list(obj.keys())
1654 return list(obj.keys())
1624 except Exception:
1655 except Exception:
1625 return []
1656 return []
1626 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1657 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1627 _safe_isinstance(obj, 'numpy', 'void'):
1658 _safe_isinstance(obj, 'numpy', 'void'):
1628 return obj.dtype.names or []
1659 return obj.dtype.names or []
1629 return []
1660 return []
1630
1661
1631 def dict_key_matches(self, text:str) -> List[str]:
1662 def dict_key_matches(self, text:str) -> List[str]:
1632 "Match string keys in a dictionary, after e.g. 'foo[' "
1663 "Match string keys in a dictionary, after e.g. 'foo[' "
1633
1664
1634
1665
1635 if self.__dict_key_regexps is not None:
1666 if self.__dict_key_regexps is not None:
1636 regexps = self.__dict_key_regexps
1667 regexps = self.__dict_key_regexps
1637 else:
1668 else:
1638 dict_key_re_fmt = r'''(?x)
1669 dict_key_re_fmt = r'''(?x)
1639 ( # match dict-referring expression wrt greedy setting
1670 ( # match dict-referring expression wrt greedy setting
1640 %s
1671 %s
1641 )
1672 )
1642 \[ # open bracket
1673 \[ # open bracket
1643 \s* # and optional whitespace
1674 \s* # and optional whitespace
1675 # Capture any number of str-like objects (e.g. "a", "b", 'c')
1676 ((?:[uUbB]? # string prefix (r not handled)
1677 (?:
1678 '(?:[^']|(?<!\\)\\')*'
1679 |
1680 "(?:[^"]|(?<!\\)\\")*"
1681 )
1682 \s*,\s*
1683 )*)
1644 ([uUbB]? # string prefix (r not handled)
1684 ([uUbB]? # string prefix (r not handled)
1645 (?: # unclosed string
1685 (?: # unclosed string
1646 '(?:[^']|(?<!\\)\\')*
1686 '(?:[^']|(?<!\\)\\')*
1647 |
1687 |
1648 "(?:[^"]|(?<!\\)\\")*
1688 "(?:[^"]|(?<!\\)\\")*
1649 )
1689 )
1650 )?
1690 )?
1651 $
1691 $
1652 '''
1692 '''
1653 regexps = self.__dict_key_regexps = {
1693 regexps = self.__dict_key_regexps = {
1654 False: re.compile(dict_key_re_fmt % r'''
1694 False: re.compile(dict_key_re_fmt % r'''
1655 # identifiers separated by .
1695 # identifiers separated by .
1656 (?!\d)\w+
1696 (?!\d)\w+
1657 (?:\.(?!\d)\w+)*
1697 (?:\.(?!\d)\w+)*
1658 '''),
1698 '''),
1659 True: re.compile(dict_key_re_fmt % '''
1699 True: re.compile(dict_key_re_fmt % '''
1660 .+
1700 .+
1661 ''')
1701 ''')
1662 }
1702 }
1663
1703
1664 match = regexps[self.greedy].search(self.text_until_cursor)
1704 match = regexps[self.greedy].search(self.text_until_cursor)
1705
1665 if match is None:
1706 if match is None:
1666 return []
1707 return []
1667
1708
1668 expr, prefix = match.groups()
1709 expr, prefix0, prefix = match.groups()
1669 try:
1710 try:
1670 obj = eval(expr, self.namespace)
1711 obj = eval(expr, self.namespace)
1671 except Exception:
1712 except Exception:
1672 try:
1713 try:
1673 obj = eval(expr, self.global_namespace)
1714 obj = eval(expr, self.global_namespace)
1674 except Exception:
1715 except Exception:
1675 return []
1716 return []
1676
1717
1677 keys = self._get_keys(obj)
1718 keys = self._get_keys(obj)
1678 if not keys:
1719 if not keys:
1679 return keys
1720 return keys
1680 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1721
1722 extra_prefix = eval(prefix0) if prefix0 != '' else None
1723
1724 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims, extra_prefix=extra_prefix)
1681 if not matches:
1725 if not matches:
1682 return matches
1726 return matches
1683
1727
1684 # get the cursor position of
1728 # get the cursor position of
1685 # - the text being completed
1729 # - the text being completed
1686 # - the start of the key text
1730 # - the start of the key text
1687 # - the start of the completion
1731 # - the start of the completion
1688 text_start = len(self.text_until_cursor) - len(text)
1732 text_start = len(self.text_until_cursor) - len(text)
1689 if prefix:
1733 if prefix:
1690 key_start = match.start(2)
1734 key_start = match.start(3)
1691 completion_start = key_start + token_offset
1735 completion_start = key_start + token_offset
1692 else:
1736 else:
1693 key_start = completion_start = match.end()
1737 key_start = completion_start = match.end()
1694
1738
1695 # grab the leading prefix, to make sure all completions start with `text`
1739 # grab the leading prefix, to make sure all completions start with `text`
1696 if text_start > key_start:
1740 if text_start > key_start:
1697 leading = ''
1741 leading = ''
1698 else:
1742 else:
1699 leading = text[text_start:completion_start]
1743 leading = text[text_start:completion_start]
1700
1744
1701 # the index of the `[` character
1745 # the index of the `[` character
1702 bracket_idx = match.end(1)
1746 bracket_idx = match.end(1)
1703
1747
1704 # append closing quote and bracket as appropriate
1748 # append closing quote and bracket as appropriate
1705 # this is *not* appropriate if the opening quote or bracket is outside
1749 # this is *not* appropriate if the opening quote or bracket is outside
1706 # the text given to this method
1750 # the text given to this method
1707 suf = ''
1751 suf = ''
1708 continuation = self.line_buffer[len(self.text_until_cursor):]
1752 continuation = self.line_buffer[len(self.text_until_cursor):]
1709 if key_start > text_start and closing_quote:
1753 if key_start > text_start and closing_quote:
1710 # quotes were opened inside text, maybe close them
1754 # quotes were opened inside text, maybe close them
1711 if continuation.startswith(closing_quote):
1755 if continuation.startswith(closing_quote):
1712 continuation = continuation[len(closing_quote):]
1756 continuation = continuation[len(closing_quote):]
1713 else:
1757 else:
1714 suf += closing_quote
1758 suf += closing_quote
1715 if bracket_idx > text_start:
1759 if bracket_idx > text_start:
1716 # brackets were opened inside text, maybe close them
1760 # brackets were opened inside text, maybe close them
1717 if not continuation.startswith(']'):
1761 if not continuation.startswith(']'):
1718 suf += ']'
1762 suf += ']'
1719
1763
1720 return [leading + k + suf for k in matches]
1764 return [leading + k + suf for k in matches]
1721
1765
1722 @staticmethod
1766 @staticmethod
1723 def unicode_name_matches(text:str) -> Tuple[str, List[str]] :
1767 def unicode_name_matches(text:str) -> Tuple[str, List[str]] :
1724 """Match Latex-like syntax for unicode characters base
1768 """Match Latex-like syntax for unicode characters base
1725 on the name of the character.
1769 on the name of the character.
1726
1770
1727 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
1771 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
1728
1772
1729 Works only on valid python 3 identifier, or on combining characters that
1773 Works only on valid python 3 identifier, or on combining characters that
1730 will combine to form a valid identifier.
1774 will combine to form a valid identifier.
1731 """
1775 """
1732 slashpos = text.rfind('\\')
1776 slashpos = text.rfind('\\')
1733 if slashpos > -1:
1777 if slashpos > -1:
1734 s = text[slashpos+1:]
1778 s = text[slashpos+1:]
1735 try :
1779 try :
1736 unic = unicodedata.lookup(s)
1780 unic = unicodedata.lookup(s)
1737 # allow combining chars
1781 # allow combining chars
1738 if ('a'+unic).isidentifier():
1782 if ('a'+unic).isidentifier():
1739 return '\\'+s,[unic]
1783 return '\\'+s,[unic]
1740 except KeyError:
1784 except KeyError:
1741 pass
1785 pass
1742 return '', []
1786 return '', []
1743
1787
1744
1788
1745 def latex_matches(self, text:str) -> Tuple[str, Sequence[str]]:
1789 def latex_matches(self, text:str) -> Tuple[str, Sequence[str]]:
1746 """Match Latex syntax for unicode characters.
1790 """Match Latex syntax for unicode characters.
1747
1791
1748 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
1792 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
1749 """
1793 """
1750 slashpos = text.rfind('\\')
1794 slashpos = text.rfind('\\')
1751 if slashpos > -1:
1795 if slashpos > -1:
1752 s = text[slashpos:]
1796 s = text[slashpos:]
1753 if s in latex_symbols:
1797 if s in latex_symbols:
1754 # Try to complete a full latex symbol to unicode
1798 # Try to complete a full latex symbol to unicode
1755 # \\alpha -> Ξ±
1799 # \\alpha -> Ξ±
1756 return s, [latex_symbols[s]]
1800 return s, [latex_symbols[s]]
1757 else:
1801 else:
1758 # If a user has partially typed a latex symbol, give them
1802 # If a user has partially typed a latex symbol, give them
1759 # a full list of options \al -> [\aleph, \alpha]
1803 # a full list of options \al -> [\aleph, \alpha]
1760 matches = [k for k in latex_symbols if k.startswith(s)]
1804 matches = [k for k in latex_symbols if k.startswith(s)]
1761 if matches:
1805 if matches:
1762 return s, matches
1806 return s, matches
1763 return '', ()
1807 return '', ()
1764
1808
1765 def dispatch_custom_completer(self, text):
1809 def dispatch_custom_completer(self, text):
1766 if not self.custom_completers:
1810 if not self.custom_completers:
1767 return
1811 return
1768
1812
1769 line = self.line_buffer
1813 line = self.line_buffer
1770 if not line.strip():
1814 if not line.strip():
1771 return None
1815 return None
1772
1816
1773 # Create a little structure to pass all the relevant information about
1817 # Create a little structure to pass all the relevant information about
1774 # the current completion to any custom completer.
1818 # the current completion to any custom completer.
1775 event = SimpleNamespace()
1819 event = SimpleNamespace()
1776 event.line = line
1820 event.line = line
1777 event.symbol = text
1821 event.symbol = text
1778 cmd = line.split(None,1)[0]
1822 cmd = line.split(None,1)[0]
1779 event.command = cmd
1823 event.command = cmd
1780 event.text_until_cursor = self.text_until_cursor
1824 event.text_until_cursor = self.text_until_cursor
1781
1825
1782 # for foo etc, try also to find completer for %foo
1826 # for foo etc, try also to find completer for %foo
1783 if not cmd.startswith(self.magic_escape):
1827 if not cmd.startswith(self.magic_escape):
1784 try_magic = self.custom_completers.s_matches(
1828 try_magic = self.custom_completers.s_matches(
1785 self.magic_escape + cmd)
1829 self.magic_escape + cmd)
1786 else:
1830 else:
1787 try_magic = []
1831 try_magic = []
1788
1832
1789 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1833 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1790 try_magic,
1834 try_magic,
1791 self.custom_completers.flat_matches(self.text_until_cursor)):
1835 self.custom_completers.flat_matches(self.text_until_cursor)):
1792 try:
1836 try:
1793 res = c(event)
1837 res = c(event)
1794 if res:
1838 if res:
1795 # first, try case sensitive match
1839 # first, try case sensitive match
1796 withcase = [r for r in res if r.startswith(text)]
1840 withcase = [r for r in res if r.startswith(text)]
1797 if withcase:
1841 if withcase:
1798 return withcase
1842 return withcase
1799 # if none, then case insensitive ones are ok too
1843 # if none, then case insensitive ones are ok too
1800 text_low = text.lower()
1844 text_low = text.lower()
1801 return [r for r in res if r.lower().startswith(text_low)]
1845 return [r for r in res if r.lower().startswith(text_low)]
1802 except TryNext:
1846 except TryNext:
1803 pass
1847 pass
1804 except KeyboardInterrupt:
1848 except KeyboardInterrupt:
1805 """
1849 """
1806 If custom completer take too long,
1850 If custom completer take too long,
1807 let keyboard interrupt abort and return nothing.
1851 let keyboard interrupt abort and return nothing.
1808 """
1852 """
1809 break
1853 break
1810
1854
1811 return None
1855 return None
1812
1856
1813 def completions(self, text: str, offset: int)->Iterator[Completion]:
1857 def completions(self, text: str, offset: int)->Iterator[Completion]:
1814 """
1858 """
1815 Returns an iterator over the possible completions
1859 Returns an iterator over the possible completions
1816
1860
1817 .. warning:: Unstable
1861 .. warning:: Unstable
1818
1862
1819 This function is unstable, API may change without warning.
1863 This function is unstable, API may change without warning.
1820 It will also raise unless use in proper context manager.
1864 It will also raise unless use in proper context manager.
1821
1865
1822 Parameters
1866 Parameters
1823 ----------
1867 ----------
1824
1868
1825 text:str
1869 text:str
1826 Full text of the current input, multi line string.
1870 Full text of the current input, multi line string.
1827 offset:int
1871 offset:int
1828 Integer representing the position of the cursor in ``text``. Offset
1872 Integer representing the position of the cursor in ``text``. Offset
1829 is 0-based indexed.
1873 is 0-based indexed.
1830
1874
1831 Yields
1875 Yields
1832 ------
1876 ------
1833 :any:`Completion` object
1877 :any:`Completion` object
1834
1878
1835
1879
1836 The cursor on a text can either be seen as being "in between"
1880 The cursor on a text can either be seen as being "in between"
1837 characters or "On" a character depending on the interface visible to
1881 characters or "On" a character depending on the interface visible to
1838 the user. For consistency the cursor being on "in between" characters X
1882 the user. For consistency the cursor being on "in between" characters X
1839 and Y is equivalent to the cursor being "on" character Y, that is to say
1883 and Y is equivalent to the cursor being "on" character Y, that is to say
1840 the character the cursor is on is considered as being after the cursor.
1884 the character the cursor is on is considered as being after the cursor.
1841
1885
1842 Combining characters may span more that one position in the
1886 Combining characters may span more that one position in the
1843 text.
1887 text.
1844
1888
1845
1889
1846 .. note::
1890 .. note::
1847
1891
1848 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1892 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1849 fake Completion token to distinguish completion returned by Jedi
1893 fake Completion token to distinguish completion returned by Jedi
1850 and usual IPython completion.
1894 and usual IPython completion.
1851
1895
1852 .. note::
1896 .. note::
1853
1897
1854 Completions are not completely deduplicated yet. If identical
1898 Completions are not completely deduplicated yet. If identical
1855 completions are coming from different sources this function does not
1899 completions are coming from different sources this function does not
1856 ensure that each completion object will only be present once.
1900 ensure that each completion object will only be present once.
1857 """
1901 """
1858 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1902 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1859 "It may change without warnings. "
1903 "It may change without warnings. "
1860 "Use in corresponding context manager.",
1904 "Use in corresponding context manager.",
1861 category=ProvisionalCompleterWarning, stacklevel=2)
1905 category=ProvisionalCompleterWarning, stacklevel=2)
1862
1906
1863 seen = set()
1907 seen = set()
1864 profiler:Optional[cProfile.Profile]
1908 profiler:Optional[cProfile.Profile]
1865 try:
1909 try:
1866 if self.profile_completions:
1910 if self.profile_completions:
1867 import cProfile
1911 import cProfile
1868 profiler = cProfile.Profile()
1912 profiler = cProfile.Profile()
1869 profiler.enable()
1913 profiler.enable()
1870 else:
1914 else:
1871 profiler = None
1915 profiler = None
1872
1916
1873 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1917 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1874 if c and (c in seen):
1918 if c and (c in seen):
1875 continue
1919 continue
1876 yield c
1920 yield c
1877 seen.add(c)
1921 seen.add(c)
1878 except KeyboardInterrupt:
1922 except KeyboardInterrupt:
1879 """if completions take too long and users send keyboard interrupt,
1923 """if completions take too long and users send keyboard interrupt,
1880 do not crash and return ASAP. """
1924 do not crash and return ASAP. """
1881 pass
1925 pass
1882 finally:
1926 finally:
1883 if profiler is not None:
1927 if profiler is not None:
1884 profiler.disable()
1928 profiler.disable()
1885 ensure_dir_exists(self.profiler_output_dir)
1929 ensure_dir_exists(self.profiler_output_dir)
1886 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
1930 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
1887 print("Writing profiler output to", output_path)
1931 print("Writing profiler output to", output_path)
1888 profiler.dump_stats(output_path)
1932 profiler.dump_stats(output_path)
1889
1933
1890 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
1934 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
1891 """
1935 """
1892 Core completion module.Same signature as :any:`completions`, with the
1936 Core completion module.Same signature as :any:`completions`, with the
1893 extra `timeout` parameter (in seconds).
1937 extra `timeout` parameter (in seconds).
1894
1938
1895
1939
1896 Computing jedi's completion ``.type`` can be quite expensive (it is a
1940 Computing jedi's completion ``.type`` can be quite expensive (it is a
1897 lazy property) and can require some warm-up, more warm up than just
1941 lazy property) and can require some warm-up, more warm up than just
1898 computing the ``name`` of a completion. The warm-up can be :
1942 computing the ``name`` of a completion. The warm-up can be :
1899
1943
1900 - Long warm-up the first time a module is encountered after
1944 - Long warm-up the first time a module is encountered after
1901 install/update: actually build parse/inference tree.
1945 install/update: actually build parse/inference tree.
1902
1946
1903 - first time the module is encountered in a session: load tree from
1947 - first time the module is encountered in a session: load tree from
1904 disk.
1948 disk.
1905
1949
1906 We don't want to block completions for tens of seconds so we give the
1950 We don't want to block completions for tens of seconds so we give the
1907 completer a "budget" of ``_timeout`` seconds per invocation to compute
1951 completer a "budget" of ``_timeout`` seconds per invocation to compute
1908 completions types, the completions that have not yet been computed will
1952 completions types, the completions that have not yet been computed will
1909 be marked as "unknown" an will have a chance to be computed next round
1953 be marked as "unknown" an will have a chance to be computed next round
1910 are things get cached.
1954 are things get cached.
1911
1955
1912 Keep in mind that Jedi is not the only thing treating the completion so
1956 Keep in mind that Jedi is not the only thing treating the completion so
1913 keep the timeout short-ish as if we take more than 0.3 second we still
1957 keep the timeout short-ish as if we take more than 0.3 second we still
1914 have lots of processing to do.
1958 have lots of processing to do.
1915
1959
1916 """
1960 """
1917 deadline = time.monotonic() + _timeout
1961 deadline = time.monotonic() + _timeout
1918
1962
1919
1963
1920 before = full_text[:offset]
1964 before = full_text[:offset]
1921 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1965 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1922
1966
1923 matched_text, matches, matches_origin, jedi_matches = self._complete(
1967 matched_text, matches, matches_origin, jedi_matches = self._complete(
1924 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1968 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1925
1969
1926 iter_jm = iter(jedi_matches)
1970 iter_jm = iter(jedi_matches)
1927 if _timeout:
1971 if _timeout:
1928 for jm in iter_jm:
1972 for jm in iter_jm:
1929 try:
1973 try:
1930 type_ = jm.type
1974 type_ = jm.type
1931 except Exception:
1975 except Exception:
1932 if self.debug:
1976 if self.debug:
1933 print("Error in Jedi getting type of ", jm)
1977 print("Error in Jedi getting type of ", jm)
1934 type_ = None
1978 type_ = None
1935 delta = len(jm.name_with_symbols) - len(jm.complete)
1979 delta = len(jm.name_with_symbols) - len(jm.complete)
1936 if type_ == 'function':
1980 if type_ == 'function':
1937 signature = _make_signature(jm)
1981 signature = _make_signature(jm)
1938 else:
1982 else:
1939 signature = ''
1983 signature = ''
1940 yield Completion(start=offset - delta,
1984 yield Completion(start=offset - delta,
1941 end=offset,
1985 end=offset,
1942 text=jm.name_with_symbols,
1986 text=jm.name_with_symbols,
1943 type=type_,
1987 type=type_,
1944 signature=signature,
1988 signature=signature,
1945 _origin='jedi')
1989 _origin='jedi')
1946
1990
1947 if time.monotonic() > deadline:
1991 if time.monotonic() > deadline:
1948 break
1992 break
1949
1993
1950 for jm in iter_jm:
1994 for jm in iter_jm:
1951 delta = len(jm.name_with_symbols) - len(jm.complete)
1995 delta = len(jm.name_with_symbols) - len(jm.complete)
1952 yield Completion(start=offset - delta,
1996 yield Completion(start=offset - delta,
1953 end=offset,
1997 end=offset,
1954 text=jm.name_with_symbols,
1998 text=jm.name_with_symbols,
1955 type='<unknown>', # don't compute type for speed
1999 type='<unknown>', # don't compute type for speed
1956 _origin='jedi',
2000 _origin='jedi',
1957 signature='')
2001 signature='')
1958
2002
1959
2003
1960 start_offset = before.rfind(matched_text)
2004 start_offset = before.rfind(matched_text)
1961
2005
1962 # TODO:
2006 # TODO:
1963 # Suppress this, right now just for debug.
2007 # Suppress this, right now just for debug.
1964 if jedi_matches and matches and self.debug:
2008 if jedi_matches and matches and self.debug:
1965 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--',
2009 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--',
1966 _origin='debug', type='none', signature='')
2010 _origin='debug', type='none', signature='')
1967
2011
1968 # I'm unsure if this is always true, so let's assert and see if it
2012 # I'm unsure if this is always true, so let's assert and see if it
1969 # crash
2013 # crash
1970 assert before.endswith(matched_text)
2014 assert before.endswith(matched_text)
1971 for m, t in zip(matches, matches_origin):
2015 for m, t in zip(matches, matches_origin):
1972 yield Completion(start=start_offset, end=offset, text=m, _origin=t, signature='', type='<unknown>')
2016 yield Completion(start=start_offset, end=offset, text=m, _origin=t, signature='', type='<unknown>')
1973
2017
1974
2018
1975 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
2019 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
1976 """Find completions for the given text and line context.
2020 """Find completions for the given text and line context.
1977
2021
1978 Note that both the text and the line_buffer are optional, but at least
2022 Note that both the text and the line_buffer are optional, but at least
1979 one of them must be given.
2023 one of them must be given.
1980
2024
1981 Parameters
2025 Parameters
1982 ----------
2026 ----------
1983 text : string, optional
2027 text : string, optional
1984 Text to perform the completion on. If not given, the line buffer
2028 Text to perform the completion on. If not given, the line buffer
1985 is split using the instance's CompletionSplitter object.
2029 is split using the instance's CompletionSplitter object.
1986
2030
1987 line_buffer : string, optional
2031 line_buffer : string, optional
1988 If not given, the completer attempts to obtain the current line
2032 If not given, the completer attempts to obtain the current line
1989 buffer via readline. This keyword allows clients which are
2033 buffer via readline. This keyword allows clients which are
1990 requesting for text completions in non-readline contexts to inform
2034 requesting for text completions in non-readline contexts to inform
1991 the completer of the entire text.
2035 the completer of the entire text.
1992
2036
1993 cursor_pos : int, optional
2037 cursor_pos : int, optional
1994 Index of the cursor in the full line buffer. Should be provided by
2038 Index of the cursor in the full line buffer. Should be provided by
1995 remote frontends where kernel has no access to frontend state.
2039 remote frontends where kernel has no access to frontend state.
1996
2040
1997 Returns
2041 Returns
1998 -------
2042 -------
1999 Tuple of two items:
2043 Tuple of two items:
2000 text : str
2044 text : str
2001 Text that was actually used in the completion.
2045 Text that was actually used in the completion.
2002 matches : list
2046 matches : list
2003 A list of completion matches.
2047 A list of completion matches.
2004
2048
2005
2049
2006 .. note::
2050 .. note::
2007
2051
2008 This API is likely to be deprecated and replaced by
2052 This API is likely to be deprecated and replaced by
2009 :any:`IPCompleter.completions` in the future.
2053 :any:`IPCompleter.completions` in the future.
2010
2054
2011
2055
2012 """
2056 """
2013 warnings.warn('`Completer.complete` is pending deprecation since '
2057 warnings.warn('`Completer.complete` is pending deprecation since '
2014 'IPython 6.0 and will be replaced by `Completer.completions`.',
2058 'IPython 6.0 and will be replaced by `Completer.completions`.',
2015 PendingDeprecationWarning)
2059 PendingDeprecationWarning)
2016 # potential todo, FOLD the 3rd throw away argument of _complete
2060 # potential todo, FOLD the 3rd throw away argument of _complete
2017 # into the first 2 one.
2061 # into the first 2 one.
2018 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
2062 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
2019
2063
2020 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
2064 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
2021 full_text=None) -> _CompleteResult:
2065 full_text=None) -> _CompleteResult:
2022 """
2066 """
2023
2067
2024 Like complete but can also returns raw jedi completions as well as the
2068 Like complete but can also returns raw jedi completions as well as the
2025 origin of the completion text. This could (and should) be made much
2069 origin of the completion text. This could (and should) be made much
2026 cleaner but that will be simpler once we drop the old (and stateful)
2070 cleaner but that will be simpler once we drop the old (and stateful)
2027 :any:`complete` API.
2071 :any:`complete` API.
2028
2072
2029
2073
2030 With current provisional API, cursor_pos act both (depending on the
2074 With current provisional API, cursor_pos act both (depending on the
2031 caller) as the offset in the ``text`` or ``line_buffer``, or as the
2075 caller) as the offset in the ``text`` or ``line_buffer``, or as the
2032 ``column`` when passing multiline strings this could/should be renamed
2076 ``column`` when passing multiline strings this could/should be renamed
2033 but would add extra noise.
2077 but would add extra noise.
2034
2078
2035 Return
2079 Return
2036 ======
2080 ======
2037
2081
2038 A tuple of N elements which are (likely):
2082 A tuple of N elements which are (likely):
2039
2083
2040 matched_text: ? the text that the complete matched
2084 matched_text: ? the text that the complete matched
2041 matches: list of completions ?
2085 matches: list of completions ?
2042 matches_origin: ? list same lenght as matches, and where each completion came from
2086 matches_origin: ? list same lenght as matches, and where each completion came from
2043 jedi_matches: list of Jedi matches, have it's own structure.
2087 jedi_matches: list of Jedi matches, have it's own structure.
2044 """
2088 """
2045
2089
2046
2090
2047 # if the cursor position isn't given, the only sane assumption we can
2091 # if the cursor position isn't given, the only sane assumption we can
2048 # make is that it's at the end of the line (the common case)
2092 # make is that it's at the end of the line (the common case)
2049 if cursor_pos is None:
2093 if cursor_pos is None:
2050 cursor_pos = len(line_buffer) if text is None else len(text)
2094 cursor_pos = len(line_buffer) if text is None else len(text)
2051
2095
2052 if self.use_main_ns:
2096 if self.use_main_ns:
2053 self.namespace = __main__.__dict__
2097 self.namespace = __main__.__dict__
2054
2098
2055 # if text is either None or an empty string, rely on the line buffer
2099 # if text is either None or an empty string, rely on the line buffer
2056 if (not line_buffer) and full_text:
2100 if (not line_buffer) and full_text:
2057 line_buffer = full_text.split('\n')[cursor_line]
2101 line_buffer = full_text.split('\n')[cursor_line]
2058 if not text: # issue #11508: check line_buffer before calling split_line
2102 if not text: # issue #11508: check line_buffer before calling split_line
2059 text = self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ''
2103 text = self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ''
2060
2104
2061 if self.backslash_combining_completions:
2105 if self.backslash_combining_completions:
2062 # allow deactivation of these on windows.
2106 # allow deactivation of these on windows.
2063 base_text = text if not line_buffer else line_buffer[:cursor_pos]
2107 base_text = text if not line_buffer else line_buffer[:cursor_pos]
2064
2108
2065 for meth in (self.latex_matches,
2109 for meth in (self.latex_matches,
2066 self.unicode_name_matches,
2110 self.unicode_name_matches,
2067 back_latex_name_matches,
2111 back_latex_name_matches,
2068 back_unicode_name_matches,
2112 back_unicode_name_matches,
2069 self.fwd_unicode_match):
2113 self.fwd_unicode_match):
2070 name_text, name_matches = meth(base_text)
2114 name_text, name_matches = meth(base_text)
2071 if name_text:
2115 if name_text:
2072 return _CompleteResult(name_text, name_matches[:MATCHES_LIMIT], \
2116 return _CompleteResult(name_text, name_matches[:MATCHES_LIMIT], \
2073 [meth.__qualname__]*min(len(name_matches), MATCHES_LIMIT), ())
2117 [meth.__qualname__]*min(len(name_matches), MATCHES_LIMIT), ())
2074
2118
2075
2119
2076 # If no line buffer is given, assume the input text is all there was
2120 # If no line buffer is given, assume the input text is all there was
2077 if line_buffer is None:
2121 if line_buffer is None:
2078 line_buffer = text
2122 line_buffer = text
2079
2123
2080 self.line_buffer = line_buffer
2124 self.line_buffer = line_buffer
2081 self.text_until_cursor = self.line_buffer[:cursor_pos]
2125 self.text_until_cursor = self.line_buffer[:cursor_pos]
2082
2126
2083 # Do magic arg matches
2127 # Do magic arg matches
2084 for matcher in self.magic_arg_matchers:
2128 for matcher in self.magic_arg_matchers:
2085 matches = list(matcher(line_buffer))[:MATCHES_LIMIT]
2129 matches = list(matcher(line_buffer))[:MATCHES_LIMIT]
2086 if matches:
2130 if matches:
2087 origins = [matcher.__qualname__] * len(matches)
2131 origins = [matcher.__qualname__] * len(matches)
2088 return _CompleteResult(text, matches, origins, ())
2132 return _CompleteResult(text, matches, origins, ())
2089
2133
2090 # Start with a clean slate of completions
2134 # Start with a clean slate of completions
2091 matches = []
2135 matches = []
2092
2136
2093 # FIXME: we should extend our api to return a dict with completions for
2137 # FIXME: we should extend our api to return a dict with completions for
2094 # different types of objects. The rlcomplete() method could then
2138 # different types of objects. The rlcomplete() method could then
2095 # simply collapse the dict into a list for readline, but we'd have
2139 # simply collapse the dict into a list for readline, but we'd have
2096 # richer completion semantics in other environments.
2140 # richer completion semantics in other environments.
2097 completions:Iterable[Any] = []
2141 completions:Iterable[Any] = []
2098 if self.use_jedi:
2142 if self.use_jedi:
2099 if not full_text:
2143 if not full_text:
2100 full_text = line_buffer
2144 full_text = line_buffer
2101 completions = self._jedi_matches(
2145 completions = self._jedi_matches(
2102 cursor_pos, cursor_line, full_text)
2146 cursor_pos, cursor_line, full_text)
2103
2147
2104 if self.merge_completions:
2148 if self.merge_completions:
2105 matches = []
2149 matches = []
2106 for matcher in self.matchers:
2150 for matcher in self.matchers:
2107 try:
2151 try:
2108 matches.extend([(m, matcher.__qualname__)
2152 matches.extend([(m, matcher.__qualname__)
2109 for m in matcher(text)])
2153 for m in matcher(text)])
2110 except:
2154 except:
2111 # Show the ugly traceback if the matcher causes an
2155 # Show the ugly traceback if the matcher causes an
2112 # exception, but do NOT crash the kernel!
2156 # exception, but do NOT crash the kernel!
2113 sys.excepthook(*sys.exc_info())
2157 sys.excepthook(*sys.exc_info())
2114 else:
2158 else:
2115 for matcher in self.matchers:
2159 for matcher in self.matchers:
2116 matches = [(m, matcher.__qualname__)
2160 matches = [(m, matcher.__qualname__)
2117 for m in matcher(text)]
2161 for m in matcher(text)]
2118 if matches:
2162 if matches:
2119 break
2163 break
2120
2164
2121 seen = set()
2165 seen = set()
2122 filtered_matches = set()
2166 filtered_matches = set()
2123 for m in matches:
2167 for m in matches:
2124 t, c = m
2168 t, c = m
2125 if t not in seen:
2169 if t not in seen:
2126 filtered_matches.add(m)
2170 filtered_matches.add(m)
2127 seen.add(t)
2171 seen.add(t)
2128
2172
2129 _filtered_matches = sorted(filtered_matches, key=lambda x: completions_sorting_key(x[0]))
2173 _filtered_matches = sorted(filtered_matches, key=lambda x: completions_sorting_key(x[0]))
2130
2174
2131 custom_res = [(m, 'custom') for m in self.dispatch_custom_completer(text) or []]
2175 custom_res = [(m, 'custom') for m in self.dispatch_custom_completer(text) or []]
2132
2176
2133 _filtered_matches = custom_res or _filtered_matches
2177 _filtered_matches = custom_res or _filtered_matches
2134
2178
2135 _filtered_matches = _filtered_matches[:MATCHES_LIMIT]
2179 _filtered_matches = _filtered_matches[:MATCHES_LIMIT]
2136 _matches = [m[0] for m in _filtered_matches]
2180 _matches = [m[0] for m in _filtered_matches]
2137 origins = [m[1] for m in _filtered_matches]
2181 origins = [m[1] for m in _filtered_matches]
2138
2182
2139 self.matches = _matches
2183 self.matches = _matches
2140
2184
2141 return _CompleteResult(text, _matches, origins, completions)
2185 return _CompleteResult(text, _matches, origins, completions)
2142
2186
2143 def fwd_unicode_match(self, text:str) -> Tuple[str, Sequence[str]]:
2187 def fwd_unicode_match(self, text:str) -> Tuple[str, Sequence[str]]:
2144 """
2188 """
2145
2189
2146 Forward match a string starting with a backslash with a list of
2190 Forward match a string starting with a backslash with a list of
2147 potential Unicode completions.
2191 potential Unicode completions.
2148
2192
2149 Will compute list list of Unicode character names on first call and cache it.
2193 Will compute list list of Unicode character names on first call and cache it.
2150
2194
2151 Return
2195 Return
2152 ======
2196 ======
2153
2197
2154 At tuple with:
2198 At tuple with:
2155 - matched text (empty if no matches)
2199 - matched text (empty if no matches)
2156 - list of potential completions, empty tuple otherwise)
2200 - list of potential completions, empty tuple otherwise)
2157 """
2201 """
2158 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
2202 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
2159 # We could do a faster match using a Trie.
2203 # We could do a faster match using a Trie.
2160
2204
2161 # Using pygtrie the follwing seem to work:
2205 # Using pygtrie the follwing seem to work:
2162
2206
2163 # s = PrefixSet()
2207 # s = PrefixSet()
2164
2208
2165 # for c in range(0,0x10FFFF + 1):
2209 # for c in range(0,0x10FFFF + 1):
2166 # try:
2210 # try:
2167 # s.add(unicodedata.name(chr(c)))
2211 # s.add(unicodedata.name(chr(c)))
2168 # except ValueError:
2212 # except ValueError:
2169 # pass
2213 # pass
2170 # [''.join(k) for k in s.iter(prefix)]
2214 # [''.join(k) for k in s.iter(prefix)]
2171
2215
2172 # But need to be timed and adds an extra dependency.
2216 # But need to be timed and adds an extra dependency.
2173
2217
2174 slashpos = text.rfind('\\')
2218 slashpos = text.rfind('\\')
2175 # if text starts with slash
2219 # if text starts with slash
2176 if slashpos > -1:
2220 if slashpos > -1:
2177 # PERF: It's important that we don't access self._unicode_names
2221 # PERF: It's important that we don't access self._unicode_names
2178 # until we're inside this if-block. _unicode_names is lazily
2222 # until we're inside this if-block. _unicode_names is lazily
2179 # initialized, and it takes a user-noticeable amount of time to
2223 # initialized, and it takes a user-noticeable amount of time to
2180 # initialize it, so we don't want to initialize it unless we're
2224 # initialize it, so we don't want to initialize it unless we're
2181 # actually going to use it.
2225 # actually going to use it.
2182 s = text[slashpos+1:]
2226 s = text[slashpos+1:]
2183 candidates = [x for x in self.unicode_names if x.startswith(s)]
2227 candidates = [x for x in self.unicode_names if x.startswith(s)]
2184 if candidates:
2228 if candidates:
2185 return s, candidates
2229 return s, candidates
2186 else:
2230 else:
2187 return '', ()
2231 return '', ()
2188
2232
2189 # if text does not start with slash
2233 # if text does not start with slash
2190 else:
2234 else:
2191 return '', ()
2235 return '', ()
2192
2236
2193 @property
2237 @property
2194 def unicode_names(self) -> List[str]:
2238 def unicode_names(self) -> List[str]:
2195 """List of names of unicode code points that can be completed.
2239 """List of names of unicode code points that can be completed.
2196
2240
2197 The list is lazily initialized on first access.
2241 The list is lazily initialized on first access.
2198 """
2242 """
2199 if self._unicode_names is None:
2243 if self._unicode_names is None:
2200 names = []
2244 names = []
2201 for c in range(0,0x10FFFF + 1):
2245 for c in range(0,0x10FFFF + 1):
2202 try:
2246 try:
2203 names.append(unicodedata.name(chr(c)))
2247 names.append(unicodedata.name(chr(c)))
2204 except ValueError:
2248 except ValueError:
2205 pass
2249 pass
2206 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
2250 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
2207
2251
2208 return self._unicode_names
2252 return self._unicode_names
2209
2253
2210 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
2254 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
2211 names = []
2255 names = []
2212 for start,stop in ranges:
2256 for start,stop in ranges:
2213 for c in range(start, stop) :
2257 for c in range(start, stop) :
2214 try:
2258 try:
2215 names.append(unicodedata.name(chr(c)))
2259 names.append(unicodedata.name(chr(c)))
2216 except ValueError:
2260 except ValueError:
2217 pass
2261 pass
2218 return names
2262 return names
@@ -1,1177 +1,1278 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Tests for the IPython tab-completion machinery."""
2 """Tests for the IPython tab-completion machinery."""
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import sys
8 import sys
9 import textwrap
9 import textwrap
10 import unittest
10 import unittest
11
11
12 from contextlib import contextmanager
12 from contextlib import contextmanager
13
13
14 import nose.tools as nt
14 import nose.tools as nt
15
15
16 from traitlets.config.loader import Config
16 from traitlets.config.loader import Config
17 from IPython import get_ipython
17 from IPython import get_ipython
18 from IPython.core import completer
18 from IPython.core import completer
19 from IPython.external import decorators
19 from IPython.external import decorators
20 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
21 from IPython.utils.generics import complete_object
21 from IPython.utils.generics import complete_object
22 from IPython.testing import decorators as dec
22 from IPython.testing import decorators as dec
23
23
24 from IPython.core.completer import (
24 from IPython.core.completer import (
25 Completion,
25 Completion,
26 provisionalcompleter,
26 provisionalcompleter,
27 match_dict_keys,
27 match_dict_keys,
28 _deduplicate_completions,
28 _deduplicate_completions,
29 )
29 )
30 from nose.tools import assert_in, assert_not_in
30 from nose.tools import assert_in, assert_not_in
31
31
32 # -----------------------------------------------------------------------------
32 # -----------------------------------------------------------------------------
33 # Test functions
33 # Test functions
34 # -----------------------------------------------------------------------------
34 # -----------------------------------------------------------------------------
35
35
36 def recompute_unicode_ranges():
36 def recompute_unicode_ranges():
37 """
37 """
38 utility to recompute the largest unicode range without any characters
38 utility to recompute the largest unicode range without any characters
39
39
40 use to recompute the gap in the global _UNICODE_RANGES of completer.py
40 use to recompute the gap in the global _UNICODE_RANGES of completer.py
41 """
41 """
42 import itertools
42 import itertools
43 import unicodedata
43 import unicodedata
44 valid = []
44 valid = []
45 for c in range(0,0x10FFFF + 1):
45 for c in range(0,0x10FFFF + 1):
46 try:
46 try:
47 unicodedata.name(chr(c))
47 unicodedata.name(chr(c))
48 except ValueError:
48 except ValueError:
49 continue
49 continue
50 valid.append(c)
50 valid.append(c)
51
51
52 def ranges(i):
52 def ranges(i):
53 for a, b in itertools.groupby(enumerate(i), lambda pair: pair[1] - pair[0]):
53 for a, b in itertools.groupby(enumerate(i), lambda pair: pair[1] - pair[0]):
54 b = list(b)
54 b = list(b)
55 yield b[0][1], b[-1][1]
55 yield b[0][1], b[-1][1]
56
56
57 rg = list(ranges(valid))
57 rg = list(ranges(valid))
58 lens = []
58 lens = []
59 gap_lens = []
59 gap_lens = []
60 pstart, pstop = 0,0
60 pstart, pstop = 0,0
61 for start, stop in rg:
61 for start, stop in rg:
62 lens.append(stop-start)
62 lens.append(stop-start)
63 gap_lens.append((start - pstop, hex(pstop), hex(start), f'{round((start - pstop)/0xe01f0*100)}%'))
63 gap_lens.append((start - pstop, hex(pstop), hex(start), f'{round((start - pstop)/0xe01f0*100)}%'))
64 pstart, pstop = start, stop
64 pstart, pstop = start, stop
65
65
66 return sorted(gap_lens)[-1]
66 return sorted(gap_lens)[-1]
67
67
68
68
69
69
70 def test_unicode_range():
70 def test_unicode_range():
71 """
71 """
72 Test that the ranges we test for unicode names give the same number of
72 Test that the ranges we test for unicode names give the same number of
73 results than testing the full length.
73 results than testing the full length.
74 """
74 """
75 from IPython.core.completer import _unicode_name_compute, _UNICODE_RANGES
75 from IPython.core.completer import _unicode_name_compute, _UNICODE_RANGES
76
76
77 expected_list = _unicode_name_compute([(0, 0x110000)])
77 expected_list = _unicode_name_compute([(0, 0x110000)])
78 test = _unicode_name_compute(_UNICODE_RANGES)
78 test = _unicode_name_compute(_UNICODE_RANGES)
79 len_exp = len(expected_list)
79 len_exp = len(expected_list)
80 len_test = len(test)
80 len_test = len(test)
81
81
82 # do not inline the len() or on error pytest will try to print the 130 000 +
82 # do not inline the len() or on error pytest will try to print the 130 000 +
83 # elements.
83 # elements.
84 message = None
84 message = None
85 if len_exp != len_test or len_exp > 131808:
85 if len_exp != len_test or len_exp > 131808:
86 size, start, stop, prct = recompute_unicode_ranges()
86 size, start, stop, prct = recompute_unicode_ranges()
87 message = f"""_UNICODE_RANGES likely wrong and need updating. This is
87 message = f"""_UNICODE_RANGES likely wrong and need updating. This is
88 likely due to a new release of Python. We've find that the biggest gap
88 likely due to a new release of Python. We've find that the biggest gap
89 in unicode characters has reduces in size to be {size} charaters
89 in unicode characters has reduces in size to be {size} charaters
90 ({prct}), from {start}, to {stop}. In completer.py likely update to
90 ({prct}), from {start}, to {stop}. In completer.py likely update to
91
91
92 _UNICODE_RANGES = [(32, {start}), ({stop}, 0xe01f0)]
92 _UNICODE_RANGES = [(32, {start}), ({stop}, 0xe01f0)]
93
93
94 And update the assertion below to use
94 And update the assertion below to use
95
95
96 len_exp <= {len_exp}
96 len_exp <= {len_exp}
97 """
97 """
98 assert len_exp == len_test, message
98 assert len_exp == len_test, message
99
99
100 # fail if new unicode symbols have been added.
100 # fail if new unicode symbols have been added.
101 assert len_exp <= 137714, message
101 assert len_exp <= 137714, message
102
102
103
103
104 @contextmanager
104 @contextmanager
105 def greedy_completion():
105 def greedy_completion():
106 ip = get_ipython()
106 ip = get_ipython()
107 greedy_original = ip.Completer.greedy
107 greedy_original = ip.Completer.greedy
108 try:
108 try:
109 ip.Completer.greedy = True
109 ip.Completer.greedy = True
110 yield
110 yield
111 finally:
111 finally:
112 ip.Completer.greedy = greedy_original
112 ip.Completer.greedy = greedy_original
113
113
114
114
115 def test_protect_filename():
115 def test_protect_filename():
116 if sys.platform == "win32":
116 if sys.platform == "win32":
117 pairs = [
117 pairs = [
118 ("abc", "abc"),
118 ("abc", "abc"),
119 (" abc", '" abc"'),
119 (" abc", '" abc"'),
120 ("a bc", '"a bc"'),
120 ("a bc", '"a bc"'),
121 ("a bc", '"a bc"'),
121 ("a bc", '"a bc"'),
122 (" bc", '" bc"'),
122 (" bc", '" bc"'),
123 ]
123 ]
124 else:
124 else:
125 pairs = [
125 pairs = [
126 ("abc", "abc"),
126 ("abc", "abc"),
127 (" abc", r"\ abc"),
127 (" abc", r"\ abc"),
128 ("a bc", r"a\ bc"),
128 ("a bc", r"a\ bc"),
129 ("a bc", r"a\ \ bc"),
129 ("a bc", r"a\ \ bc"),
130 (" bc", r"\ \ bc"),
130 (" bc", r"\ \ bc"),
131 # On posix, we also protect parens and other special characters.
131 # On posix, we also protect parens and other special characters.
132 ("a(bc", r"a\(bc"),
132 ("a(bc", r"a\(bc"),
133 ("a)bc", r"a\)bc"),
133 ("a)bc", r"a\)bc"),
134 ("a( )bc", r"a\(\ \)bc"),
134 ("a( )bc", r"a\(\ \)bc"),
135 ("a[1]bc", r"a\[1\]bc"),
135 ("a[1]bc", r"a\[1\]bc"),
136 ("a{1}bc", r"a\{1\}bc"),
136 ("a{1}bc", r"a\{1\}bc"),
137 ("a#bc", r"a\#bc"),
137 ("a#bc", r"a\#bc"),
138 ("a?bc", r"a\?bc"),
138 ("a?bc", r"a\?bc"),
139 ("a=bc", r"a\=bc"),
139 ("a=bc", r"a\=bc"),
140 ("a\\bc", r"a\\bc"),
140 ("a\\bc", r"a\\bc"),
141 ("a|bc", r"a\|bc"),
141 ("a|bc", r"a\|bc"),
142 ("a;bc", r"a\;bc"),
142 ("a;bc", r"a\;bc"),
143 ("a:bc", r"a\:bc"),
143 ("a:bc", r"a\:bc"),
144 ("a'bc", r"a\'bc"),
144 ("a'bc", r"a\'bc"),
145 ("a*bc", r"a\*bc"),
145 ("a*bc", r"a\*bc"),
146 ('a"bc', r"a\"bc"),
146 ('a"bc', r"a\"bc"),
147 ("a^bc", r"a\^bc"),
147 ("a^bc", r"a\^bc"),
148 ("a&bc", r"a\&bc"),
148 ("a&bc", r"a\&bc"),
149 ]
149 ]
150 # run the actual tests
150 # run the actual tests
151 for s1, s2 in pairs:
151 for s1, s2 in pairs:
152 s1p = completer.protect_filename(s1)
152 s1p = completer.protect_filename(s1)
153 nt.assert_equal(s1p, s2)
153 nt.assert_equal(s1p, s2)
154
154
155
155
156 def check_line_split(splitter, test_specs):
156 def check_line_split(splitter, test_specs):
157 for part1, part2, split in test_specs:
157 for part1, part2, split in test_specs:
158 cursor_pos = len(part1)
158 cursor_pos = len(part1)
159 line = part1 + part2
159 line = part1 + part2
160 out = splitter.split_line(line, cursor_pos)
160 out = splitter.split_line(line, cursor_pos)
161 nt.assert_equal(out, split)
161 nt.assert_equal(out, split)
162
162
163
163
164 def test_line_split():
164 def test_line_split():
165 """Basic line splitter test with default specs."""
165 """Basic line splitter test with default specs."""
166 sp = completer.CompletionSplitter()
166 sp = completer.CompletionSplitter()
167 # The format of the test specs is: part1, part2, expected answer. Parts 1
167 # The format of the test specs is: part1, part2, expected answer. Parts 1
168 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
168 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
169 # was at the end of part1. So an empty part2 represents someone hitting
169 # was at the end of part1. So an empty part2 represents someone hitting
170 # tab at the end of the line, the most common case.
170 # tab at the end of the line, the most common case.
171 t = [
171 t = [
172 ("run some/scrip", "", "some/scrip"),
172 ("run some/scrip", "", "some/scrip"),
173 ("run scripts/er", "ror.py foo", "scripts/er"),
173 ("run scripts/er", "ror.py foo", "scripts/er"),
174 ("echo $HOM", "", "HOM"),
174 ("echo $HOM", "", "HOM"),
175 ("print sys.pa", "", "sys.pa"),
175 ("print sys.pa", "", "sys.pa"),
176 ("print(sys.pa", "", "sys.pa"),
176 ("print(sys.pa", "", "sys.pa"),
177 ("execfile('scripts/er", "", "scripts/er"),
177 ("execfile('scripts/er", "", "scripts/er"),
178 ("a[x.", "", "x."),
178 ("a[x.", "", "x."),
179 ("a[x.", "y", "x."),
179 ("a[x.", "y", "x."),
180 ('cd "some_file/', "", "some_file/"),
180 ('cd "some_file/', "", "some_file/"),
181 ]
181 ]
182 check_line_split(sp, t)
182 check_line_split(sp, t)
183 # Ensure splitting works OK with unicode by re-running the tests with
183 # Ensure splitting works OK with unicode by re-running the tests with
184 # all inputs turned into unicode
184 # all inputs turned into unicode
185 check_line_split(sp, [map(str, p) for p in t])
185 check_line_split(sp, [map(str, p) for p in t])
186
186
187
187
188 class NamedInstanceMetaclass(type):
188 class NamedInstanceMetaclass(type):
189 def __getitem__(cls, item):
189 def __getitem__(cls, item):
190 return cls.get_instance(item)
190 return cls.get_instance(item)
191
191
192
192
193 class NamedInstanceClass(metaclass=NamedInstanceMetaclass):
193 class NamedInstanceClass(metaclass=NamedInstanceMetaclass):
194 def __init__(self, name):
194 def __init__(self, name):
195 if not hasattr(self.__class__, "instances"):
195 if not hasattr(self.__class__, "instances"):
196 self.__class__.instances = {}
196 self.__class__.instances = {}
197 self.__class__.instances[name] = self
197 self.__class__.instances[name] = self
198
198
199 @classmethod
199 @classmethod
200 def _ipython_key_completions_(cls):
200 def _ipython_key_completions_(cls):
201 return cls.instances.keys()
201 return cls.instances.keys()
202
202
203 @classmethod
203 @classmethod
204 def get_instance(cls, name):
204 def get_instance(cls, name):
205 return cls.instances[name]
205 return cls.instances[name]
206
206
207
207
208 class KeyCompletable:
208 class KeyCompletable:
209 def __init__(self, things=()):
209 def __init__(self, things=()):
210 self.things = things
210 self.things = things
211
211
212 def _ipython_key_completions_(self):
212 def _ipython_key_completions_(self):
213 return list(self.things)
213 return list(self.things)
214
214
215
215
216 class TestCompleter(unittest.TestCase):
216 class TestCompleter(unittest.TestCase):
217 def setUp(self):
217 def setUp(self):
218 """
218 """
219 We want to silence all PendingDeprecationWarning when testing the completer
219 We want to silence all PendingDeprecationWarning when testing the completer
220 """
220 """
221 self._assertwarns = self.assertWarns(PendingDeprecationWarning)
221 self._assertwarns = self.assertWarns(PendingDeprecationWarning)
222 self._assertwarns.__enter__()
222 self._assertwarns.__enter__()
223
223
224 def tearDown(self):
224 def tearDown(self):
225 try:
225 try:
226 self._assertwarns.__exit__(None, None, None)
226 self._assertwarns.__exit__(None, None, None)
227 except AssertionError:
227 except AssertionError:
228 pass
228 pass
229
229
230 def test_custom_completion_error(self):
230 def test_custom_completion_error(self):
231 """Test that errors from custom attribute completers are silenced."""
231 """Test that errors from custom attribute completers are silenced."""
232 ip = get_ipython()
232 ip = get_ipython()
233
233
234 class A:
234 class A:
235 pass
235 pass
236
236
237 ip.user_ns["x"] = A()
237 ip.user_ns["x"] = A()
238
238
239 @complete_object.register(A)
239 @complete_object.register(A)
240 def complete_A(a, existing_completions):
240 def complete_A(a, existing_completions):
241 raise TypeError("this should be silenced")
241 raise TypeError("this should be silenced")
242
242
243 ip.complete("x.")
243 ip.complete("x.")
244
244
245 def test_custom_completion_ordering(self):
245 def test_custom_completion_ordering(self):
246 """Test that errors from custom attribute completers are silenced."""
246 """Test that errors from custom attribute completers are silenced."""
247 ip = get_ipython()
247 ip = get_ipython()
248
248
249 _, matches = ip.complete('in')
249 _, matches = ip.complete('in')
250 assert matches.index('input') < matches.index('int')
250 assert matches.index('input') < matches.index('int')
251
251
252 def complete_example(a):
252 def complete_example(a):
253 return ['example2', 'example1']
253 return ['example2', 'example1']
254
254
255 ip.Completer.custom_completers.add_re('ex*', complete_example)
255 ip.Completer.custom_completers.add_re('ex*', complete_example)
256 _, matches = ip.complete('ex')
256 _, matches = ip.complete('ex')
257 assert matches.index('example2') < matches.index('example1')
257 assert matches.index('example2') < matches.index('example1')
258
258
259 def test_unicode_completions(self):
259 def test_unicode_completions(self):
260 ip = get_ipython()
260 ip = get_ipython()
261 # Some strings that trigger different types of completion. Check them both
261 # Some strings that trigger different types of completion. Check them both
262 # in str and unicode forms
262 # in str and unicode forms
263 s = ["ru", "%ru", "cd /", "floa", "float(x)/"]
263 s = ["ru", "%ru", "cd /", "floa", "float(x)/"]
264 for t in s + list(map(str, s)):
264 for t in s + list(map(str, s)):
265 # We don't need to check exact completion values (they may change
265 # We don't need to check exact completion values (they may change
266 # depending on the state of the namespace, but at least no exceptions
266 # depending on the state of the namespace, but at least no exceptions
267 # should be thrown and the return value should be a pair of text, list
267 # should be thrown and the return value should be a pair of text, list
268 # values.
268 # values.
269 text, matches = ip.complete(t)
269 text, matches = ip.complete(t)
270 nt.assert_true(isinstance(text, str))
270 nt.assert_true(isinstance(text, str))
271 nt.assert_true(isinstance(matches, list))
271 nt.assert_true(isinstance(matches, list))
272
272
273 def test_latex_completions(self):
273 def test_latex_completions(self):
274 from IPython.core.latex_symbols import latex_symbols
274 from IPython.core.latex_symbols import latex_symbols
275 import random
275 import random
276
276
277 ip = get_ipython()
277 ip = get_ipython()
278 # Test some random unicode symbols
278 # Test some random unicode symbols
279 keys = random.sample(latex_symbols.keys(), 10)
279 keys = random.sample(latex_symbols.keys(), 10)
280 for k in keys:
280 for k in keys:
281 text, matches = ip.complete(k)
281 text, matches = ip.complete(k)
282 nt.assert_equal(text, k)
282 nt.assert_equal(text, k)
283 nt.assert_equal(matches, [latex_symbols[k]])
283 nt.assert_equal(matches, [latex_symbols[k]])
284 # Test a more complex line
284 # Test a more complex line
285 text, matches = ip.complete("print(\\alpha")
285 text, matches = ip.complete("print(\\alpha")
286 nt.assert_equal(text, "\\alpha")
286 nt.assert_equal(text, "\\alpha")
287 nt.assert_equal(matches[0], latex_symbols["\\alpha"])
287 nt.assert_equal(matches[0], latex_symbols["\\alpha"])
288 # Test multiple matching latex symbols
288 # Test multiple matching latex symbols
289 text, matches = ip.complete("\\al")
289 text, matches = ip.complete("\\al")
290 nt.assert_in("\\alpha", matches)
290 nt.assert_in("\\alpha", matches)
291 nt.assert_in("\\aleph", matches)
291 nt.assert_in("\\aleph", matches)
292
292
293 def test_latex_no_results(self):
293 def test_latex_no_results(self):
294 """
294 """
295 forward latex should really return nothing in either field if nothing is found.
295 forward latex should really return nothing in either field if nothing is found.
296 """
296 """
297 ip = get_ipython()
297 ip = get_ipython()
298 text, matches = ip.Completer.latex_matches("\\really_i_should_match_nothing")
298 text, matches = ip.Completer.latex_matches("\\really_i_should_match_nothing")
299 nt.assert_equal(text, "")
299 nt.assert_equal(text, "")
300 nt.assert_equal(matches, ())
300 nt.assert_equal(matches, ())
301
301
302 def test_back_latex_completion(self):
302 def test_back_latex_completion(self):
303 ip = get_ipython()
303 ip = get_ipython()
304
304
305 # do not return more than 1 matches fro \beta, only the latex one.
305 # do not return more than 1 matches fro \beta, only the latex one.
306 name, matches = ip.complete("\\Ξ²")
306 name, matches = ip.complete("\\Ξ²")
307 nt.assert_equal(matches, ['\\beta'])
307 nt.assert_equal(matches, ['\\beta'])
308
308
309 def test_back_unicode_completion(self):
309 def test_back_unicode_completion(self):
310 ip = get_ipython()
310 ip = get_ipython()
311
311
312 name, matches = ip.complete("\\β…€")
312 name, matches = ip.complete("\\β…€")
313 nt.assert_equal(matches, ("\\ROMAN NUMERAL FIVE",))
313 nt.assert_equal(matches, ("\\ROMAN NUMERAL FIVE",))
314
314
315 def test_forward_unicode_completion(self):
315 def test_forward_unicode_completion(self):
316 ip = get_ipython()
316 ip = get_ipython()
317
317
318 name, matches = ip.complete("\\ROMAN NUMERAL FIVE")
318 name, matches = ip.complete("\\ROMAN NUMERAL FIVE")
319 nt.assert_equal(matches, ["β…€"] ) # This is not a V
319 nt.assert_equal(matches, ["β…€"] ) # This is not a V
320 nt.assert_equal(matches, ["\u2164"] ) # same as above but explicit.
320 nt.assert_equal(matches, ["\u2164"] ) # same as above but explicit.
321
321
322 @nt.nottest # now we have a completion for \jmath
322 @nt.nottest # now we have a completion for \jmath
323 @decorators.knownfailureif(
323 @decorators.knownfailureif(
324 sys.platform == "win32", "Fails if there is a C:\\j... path"
324 sys.platform == "win32", "Fails if there is a C:\\j... path"
325 )
325 )
326 def test_no_ascii_back_completion(self):
326 def test_no_ascii_back_completion(self):
327 ip = get_ipython()
327 ip = get_ipython()
328 with TemporaryWorkingDirectory(): # Avoid any filename completions
328 with TemporaryWorkingDirectory(): # Avoid any filename completions
329 # single ascii letter that don't have yet completions
329 # single ascii letter that don't have yet completions
330 for letter in "jJ":
330 for letter in "jJ":
331 name, matches = ip.complete("\\" + letter)
331 name, matches = ip.complete("\\" + letter)
332 nt.assert_equal(matches, [])
332 nt.assert_equal(matches, [])
333
333
334 class CompletionSplitterTestCase(unittest.TestCase):
334 class CompletionSplitterTestCase(unittest.TestCase):
335 def setUp(self):
335 def setUp(self):
336 self.sp = completer.CompletionSplitter()
336 self.sp = completer.CompletionSplitter()
337
337
338 def test_delim_setting(self):
338 def test_delim_setting(self):
339 self.sp.delims = " "
339 self.sp.delims = " "
340 nt.assert_equal(self.sp.delims, " ")
340 nt.assert_equal(self.sp.delims, " ")
341 nt.assert_equal(self.sp._delim_expr, r"[\ ]")
341 nt.assert_equal(self.sp._delim_expr, r"[\ ]")
342
342
343 def test_spaces(self):
343 def test_spaces(self):
344 """Test with only spaces as split chars."""
344 """Test with only spaces as split chars."""
345 self.sp.delims = " "
345 self.sp.delims = " "
346 t = [("foo", "", "foo"), ("run foo", "", "foo"), ("run foo", "bar", "foo")]
346 t = [("foo", "", "foo"), ("run foo", "", "foo"), ("run foo", "bar", "foo")]
347 check_line_split(self.sp, t)
347 check_line_split(self.sp, t)
348
348
349 def test_has_open_quotes1(self):
349 def test_has_open_quotes1(self):
350 for s in ["'", "'''", "'hi' '"]:
350 for s in ["'", "'''", "'hi' '"]:
351 nt.assert_equal(completer.has_open_quotes(s), "'")
351 nt.assert_equal(completer.has_open_quotes(s), "'")
352
352
353 def test_has_open_quotes2(self):
353 def test_has_open_quotes2(self):
354 for s in ['"', '"""', '"hi" "']:
354 for s in ['"', '"""', '"hi" "']:
355 nt.assert_equal(completer.has_open_quotes(s), '"')
355 nt.assert_equal(completer.has_open_quotes(s), '"')
356
356
357 def test_has_open_quotes3(self):
357 def test_has_open_quotes3(self):
358 for s in ["''", "''' '''", "'hi' 'ipython'"]:
358 for s in ["''", "''' '''", "'hi' 'ipython'"]:
359 nt.assert_false(completer.has_open_quotes(s))
359 nt.assert_false(completer.has_open_quotes(s))
360
360
361 def test_has_open_quotes4(self):
361 def test_has_open_quotes4(self):
362 for s in ['""', '""" """', '"hi" "ipython"']:
362 for s in ['""', '""" """', '"hi" "ipython"']:
363 nt.assert_false(completer.has_open_quotes(s))
363 nt.assert_false(completer.has_open_quotes(s))
364
364
365 @decorators.knownfailureif(
365 @decorators.knownfailureif(
366 sys.platform == "win32", "abspath completions fail on Windows"
366 sys.platform == "win32", "abspath completions fail on Windows"
367 )
367 )
368 def test_abspath_file_completions(self):
368 def test_abspath_file_completions(self):
369 ip = get_ipython()
369 ip = get_ipython()
370 with TemporaryDirectory() as tmpdir:
370 with TemporaryDirectory() as tmpdir:
371 prefix = os.path.join(tmpdir, "foo")
371 prefix = os.path.join(tmpdir, "foo")
372 suffixes = ["1", "2"]
372 suffixes = ["1", "2"]
373 names = [prefix + s for s in suffixes]
373 names = [prefix + s for s in suffixes]
374 for n in names:
374 for n in names:
375 open(n, "w").close()
375 open(n, "w").close()
376
376
377 # Check simple completion
377 # Check simple completion
378 c = ip.complete(prefix)[1]
378 c = ip.complete(prefix)[1]
379 nt.assert_equal(c, names)
379 nt.assert_equal(c, names)
380
380
381 # Now check with a function call
381 # Now check with a function call
382 cmd = 'a = f("%s' % prefix
382 cmd = 'a = f("%s' % prefix
383 c = ip.complete(prefix, cmd)[1]
383 c = ip.complete(prefix, cmd)[1]
384 comp = [prefix + s for s in suffixes]
384 comp = [prefix + s for s in suffixes]
385 nt.assert_equal(c, comp)
385 nt.assert_equal(c, comp)
386
386
387 def test_local_file_completions(self):
387 def test_local_file_completions(self):
388 ip = get_ipython()
388 ip = get_ipython()
389 with TemporaryWorkingDirectory():
389 with TemporaryWorkingDirectory():
390 prefix = "./foo"
390 prefix = "./foo"
391 suffixes = ["1", "2"]
391 suffixes = ["1", "2"]
392 names = [prefix + s for s in suffixes]
392 names = [prefix + s for s in suffixes]
393 for n in names:
393 for n in names:
394 open(n, "w").close()
394 open(n, "w").close()
395
395
396 # Check simple completion
396 # Check simple completion
397 c = ip.complete(prefix)[1]
397 c = ip.complete(prefix)[1]
398 nt.assert_equal(c, names)
398 nt.assert_equal(c, names)
399
399
400 # Now check with a function call
400 # Now check with a function call
401 cmd = 'a = f("%s' % prefix
401 cmd = 'a = f("%s' % prefix
402 c = ip.complete(prefix, cmd)[1]
402 c = ip.complete(prefix, cmd)[1]
403 comp = {prefix + s for s in suffixes}
403 comp = {prefix + s for s in suffixes}
404 nt.assert_true(comp.issubset(set(c)))
404 nt.assert_true(comp.issubset(set(c)))
405
405
406 def test_quoted_file_completions(self):
406 def test_quoted_file_completions(self):
407 ip = get_ipython()
407 ip = get_ipython()
408 with TemporaryWorkingDirectory():
408 with TemporaryWorkingDirectory():
409 name = "foo'bar"
409 name = "foo'bar"
410 open(name, "w").close()
410 open(name, "w").close()
411
411
412 # Don't escape Windows
412 # Don't escape Windows
413 escaped = name if sys.platform == "win32" else "foo\\'bar"
413 escaped = name if sys.platform == "win32" else "foo\\'bar"
414
414
415 # Single quote matches embedded single quote
415 # Single quote matches embedded single quote
416 text = "open('foo"
416 text = "open('foo"
417 c = ip.Completer._complete(
417 c = ip.Completer._complete(
418 cursor_line=0, cursor_pos=len(text), full_text=text
418 cursor_line=0, cursor_pos=len(text), full_text=text
419 )[1]
419 )[1]
420 nt.assert_equal(c, [escaped])
420 nt.assert_equal(c, [escaped])
421
421
422 # Double quote requires no escape
422 # Double quote requires no escape
423 text = 'open("foo'
423 text = 'open("foo'
424 c = ip.Completer._complete(
424 c = ip.Completer._complete(
425 cursor_line=0, cursor_pos=len(text), full_text=text
425 cursor_line=0, cursor_pos=len(text), full_text=text
426 )[1]
426 )[1]
427 nt.assert_equal(c, [name])
427 nt.assert_equal(c, [name])
428
428
429 # No quote requires an escape
429 # No quote requires an escape
430 text = "%ls foo"
430 text = "%ls foo"
431 c = ip.Completer._complete(
431 c = ip.Completer._complete(
432 cursor_line=0, cursor_pos=len(text), full_text=text
432 cursor_line=0, cursor_pos=len(text), full_text=text
433 )[1]
433 )[1]
434 nt.assert_equal(c, [escaped])
434 nt.assert_equal(c, [escaped])
435
435
436 def test_all_completions_dups(self):
436 def test_all_completions_dups(self):
437 """
437 """
438 Make sure the output of `IPCompleter.all_completions` does not have
438 Make sure the output of `IPCompleter.all_completions` does not have
439 duplicated prefixes.
439 duplicated prefixes.
440 """
440 """
441 ip = get_ipython()
441 ip = get_ipython()
442 c = ip.Completer
442 c = ip.Completer
443 ip.ex("class TestClass():\n\ta=1\n\ta1=2")
443 ip.ex("class TestClass():\n\ta=1\n\ta1=2")
444 for jedi_status in [True, False]:
444 for jedi_status in [True, False]:
445 with provisionalcompleter():
445 with provisionalcompleter():
446 ip.Completer.use_jedi = jedi_status
446 ip.Completer.use_jedi = jedi_status
447 matches = c.all_completions("TestCl")
447 matches = c.all_completions("TestCl")
448 assert matches == ['TestClass'], jedi_status
448 assert matches == ['TestClass'], jedi_status
449 matches = c.all_completions("TestClass.")
449 matches = c.all_completions("TestClass.")
450 assert len(matches) > 2, jedi_status
450 assert len(matches) > 2, jedi_status
451 matches = c.all_completions("TestClass.a")
451 matches = c.all_completions("TestClass.a")
452 assert matches == ['TestClass.a', 'TestClass.a1'], jedi_status
452 assert matches == ['TestClass.a', 'TestClass.a1'], jedi_status
453
453
454 def test_jedi(self):
454 def test_jedi(self):
455 """
455 """
456 A couple of issue we had with Jedi
456 A couple of issue we had with Jedi
457 """
457 """
458 ip = get_ipython()
458 ip = get_ipython()
459
459
460 def _test_complete(reason, s, comp, start=None, end=None):
460 def _test_complete(reason, s, comp, start=None, end=None):
461 l = len(s)
461 l = len(s)
462 start = start if start is not None else l
462 start = start if start is not None else l
463 end = end if end is not None else l
463 end = end if end is not None else l
464 with provisionalcompleter():
464 with provisionalcompleter():
465 ip.Completer.use_jedi = True
465 ip.Completer.use_jedi = True
466 completions = set(ip.Completer.completions(s, l))
466 completions = set(ip.Completer.completions(s, l))
467 ip.Completer.use_jedi = False
467 ip.Completer.use_jedi = False
468 assert_in(Completion(start, end, comp), completions, reason)
468 assert_in(Completion(start, end, comp), completions, reason)
469
469
470 def _test_not_complete(reason, s, comp):
470 def _test_not_complete(reason, s, comp):
471 l = len(s)
471 l = len(s)
472 with provisionalcompleter():
472 with provisionalcompleter():
473 ip.Completer.use_jedi = True
473 ip.Completer.use_jedi = True
474 completions = set(ip.Completer.completions(s, l))
474 completions = set(ip.Completer.completions(s, l))
475 ip.Completer.use_jedi = False
475 ip.Completer.use_jedi = False
476 assert_not_in(Completion(l, l, comp), completions, reason)
476 assert_not_in(Completion(l, l, comp), completions, reason)
477
477
478 import jedi
478 import jedi
479
479
480 jedi_version = tuple(int(i) for i in jedi.__version__.split(".")[:3])
480 jedi_version = tuple(int(i) for i in jedi.__version__.split(".")[:3])
481 if jedi_version > (0, 10):
481 if jedi_version > (0, 10):
482 yield _test_complete, "jedi >0.9 should complete and not crash", "a=1;a.", "real"
482 yield _test_complete, "jedi >0.9 should complete and not crash", "a=1;a.", "real"
483 yield _test_complete, "can infer first argument", 'a=(1,"foo");a[0].', "real"
483 yield _test_complete, "can infer first argument", 'a=(1,"foo");a[0].', "real"
484 yield _test_complete, "can infer second argument", 'a=(1,"foo");a[1].', "capitalize"
484 yield _test_complete, "can infer second argument", 'a=(1,"foo");a[1].', "capitalize"
485 yield _test_complete, "cover duplicate completions", "im", "import", 0, 2
485 yield _test_complete, "cover duplicate completions", "im", "import", 0, 2
486
486
487 yield _test_not_complete, "does not mix types", 'a=(1,"foo");a[0].', "capitalize"
487 yield _test_not_complete, "does not mix types", 'a=(1,"foo");a[0].', "capitalize"
488
488
489 def test_completion_have_signature(self):
489 def test_completion_have_signature(self):
490 """
490 """
491 Lets make sure jedi is capable of pulling out the signature of the function we are completing.
491 Lets make sure jedi is capable of pulling out the signature of the function we are completing.
492 """
492 """
493 ip = get_ipython()
493 ip = get_ipython()
494 with provisionalcompleter():
494 with provisionalcompleter():
495 ip.Completer.use_jedi = True
495 ip.Completer.use_jedi = True
496 completions = ip.Completer.completions("ope", 3)
496 completions = ip.Completer.completions("ope", 3)
497 c = next(completions) # should be `open`
497 c = next(completions) # should be `open`
498 ip.Completer.use_jedi = False
498 ip.Completer.use_jedi = False
499 assert "file" in c.signature, "Signature of function was not found by completer"
499 assert "file" in c.signature, "Signature of function was not found by completer"
500 assert (
500 assert (
501 "encoding" in c.signature
501 "encoding" in c.signature
502 ), "Signature of function was not found by completer"
502 ), "Signature of function was not found by completer"
503
503
504 def test_deduplicate_completions(self):
504 def test_deduplicate_completions(self):
505 """
505 """
506 Test that completions are correctly deduplicated (even if ranges are not the same)
506 Test that completions are correctly deduplicated (even if ranges are not the same)
507 """
507 """
508 ip = get_ipython()
508 ip = get_ipython()
509 ip.ex(
509 ip.ex(
510 textwrap.dedent(
510 textwrap.dedent(
511 """
511 """
512 class Z:
512 class Z:
513 zoo = 1
513 zoo = 1
514 """
514 """
515 )
515 )
516 )
516 )
517 with provisionalcompleter():
517 with provisionalcompleter():
518 ip.Completer.use_jedi = True
518 ip.Completer.use_jedi = True
519 l = list(
519 l = list(
520 _deduplicate_completions("Z.z", ip.Completer.completions("Z.z", 3))
520 _deduplicate_completions("Z.z", ip.Completer.completions("Z.z", 3))
521 )
521 )
522 ip.Completer.use_jedi = False
522 ip.Completer.use_jedi = False
523
523
524 assert len(l) == 1, "Completions (Z.z<tab>) correctly deduplicate: %s " % l
524 assert len(l) == 1, "Completions (Z.z<tab>) correctly deduplicate: %s " % l
525 assert l[0].text == "zoo" # and not `it.accumulate`
525 assert l[0].text == "zoo" # and not `it.accumulate`
526
526
527 def test_greedy_completions(self):
527 def test_greedy_completions(self):
528 """
528 """
529 Test the capability of the Greedy completer.
529 Test the capability of the Greedy completer.
530
530
531 Most of the test here does not really show off the greedy completer, for proof
531 Most of the test here does not really show off the greedy completer, for proof
532 each of the text below now pass with Jedi. The greedy completer is capable of more.
532 each of the text below now pass with Jedi. The greedy completer is capable of more.
533
533
534 See the :any:`test_dict_key_completion_contexts`
534 See the :any:`test_dict_key_completion_contexts`
535
535
536 """
536 """
537 ip = get_ipython()
537 ip = get_ipython()
538 ip.ex("a=list(range(5))")
538 ip.ex("a=list(range(5))")
539 _, c = ip.complete(".", line="a[0].")
539 _, c = ip.complete(".", line="a[0].")
540 nt.assert_false(".real" in c, "Shouldn't have completed on a[0]: %s" % c)
540 nt.assert_false(".real" in c, "Shouldn't have completed on a[0]: %s" % c)
541
541
542 def _(line, cursor_pos, expect, message, completion):
542 def _(line, cursor_pos, expect, message, completion):
543 with greedy_completion(), provisionalcompleter():
543 with greedy_completion(), provisionalcompleter():
544 ip.Completer.use_jedi = False
544 ip.Completer.use_jedi = False
545 _, c = ip.complete(".", line=line, cursor_pos=cursor_pos)
545 _, c = ip.complete(".", line=line, cursor_pos=cursor_pos)
546 nt.assert_in(expect, c, message % c)
546 nt.assert_in(expect, c, message % c)
547
547
548 ip.Completer.use_jedi = True
548 ip.Completer.use_jedi = True
549 with provisionalcompleter():
549 with provisionalcompleter():
550 completions = ip.Completer.completions(line, cursor_pos)
550 completions = ip.Completer.completions(line, cursor_pos)
551 nt.assert_in(completion, completions)
551 nt.assert_in(completion, completions)
552
552
553 with provisionalcompleter():
553 with provisionalcompleter():
554 yield _, "a[0].", 5, "a[0].real", "Should have completed on a[0].: %s", Completion(
554 yield _, "a[0].", 5, "a[0].real", "Should have completed on a[0].: %s", Completion(
555 5, 5, "real"
555 5, 5, "real"
556 )
556 )
557 yield _, "a[0].r", 6, "a[0].real", "Should have completed on a[0].r: %s", Completion(
557 yield _, "a[0].r", 6, "a[0].real", "Should have completed on a[0].r: %s", Completion(
558 5, 6, "real"
558 5, 6, "real"
559 )
559 )
560
560
561 yield _, "a[0].from_", 10, "a[0].from_bytes", "Should have completed on a[0].from_: %s", Completion(
561 yield _, "a[0].from_", 10, "a[0].from_bytes", "Should have completed on a[0].from_: %s", Completion(
562 5, 10, "from_bytes"
562 5, 10, "from_bytes"
563 )
563 )
564
564
565 def test_omit__names(self):
565 def test_omit__names(self):
566 # also happens to test IPCompleter as a configurable
566 # also happens to test IPCompleter as a configurable
567 ip = get_ipython()
567 ip = get_ipython()
568 ip._hidden_attr = 1
568 ip._hidden_attr = 1
569 ip._x = {}
569 ip._x = {}
570 c = ip.Completer
570 c = ip.Completer
571 ip.ex("ip=get_ipython()")
571 ip.ex("ip=get_ipython()")
572 cfg = Config()
572 cfg = Config()
573 cfg.IPCompleter.omit__names = 0
573 cfg.IPCompleter.omit__names = 0
574 c.update_config(cfg)
574 c.update_config(cfg)
575 with provisionalcompleter():
575 with provisionalcompleter():
576 c.use_jedi = False
576 c.use_jedi = False
577 s, matches = c.complete("ip.")
577 s, matches = c.complete("ip.")
578 nt.assert_in("ip.__str__", matches)
578 nt.assert_in("ip.__str__", matches)
579 nt.assert_in("ip._hidden_attr", matches)
579 nt.assert_in("ip._hidden_attr", matches)
580
580
581 # c.use_jedi = True
581 # c.use_jedi = True
582 # completions = set(c.completions('ip.', 3))
582 # completions = set(c.completions('ip.', 3))
583 # nt.assert_in(Completion(3, 3, '__str__'), completions)
583 # nt.assert_in(Completion(3, 3, '__str__'), completions)
584 # nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
584 # nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
585
585
586 cfg = Config()
586 cfg = Config()
587 cfg.IPCompleter.omit__names = 1
587 cfg.IPCompleter.omit__names = 1
588 c.update_config(cfg)
588 c.update_config(cfg)
589 with provisionalcompleter():
589 with provisionalcompleter():
590 c.use_jedi = False
590 c.use_jedi = False
591 s, matches = c.complete("ip.")
591 s, matches = c.complete("ip.")
592 nt.assert_not_in("ip.__str__", matches)
592 nt.assert_not_in("ip.__str__", matches)
593 # nt.assert_in('ip._hidden_attr', matches)
593 # nt.assert_in('ip._hidden_attr', matches)
594
594
595 # c.use_jedi = True
595 # c.use_jedi = True
596 # completions = set(c.completions('ip.', 3))
596 # completions = set(c.completions('ip.', 3))
597 # nt.assert_not_in(Completion(3,3,'__str__'), completions)
597 # nt.assert_not_in(Completion(3,3,'__str__'), completions)
598 # nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
598 # nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
599
599
600 cfg = Config()
600 cfg = Config()
601 cfg.IPCompleter.omit__names = 2
601 cfg.IPCompleter.omit__names = 2
602 c.update_config(cfg)
602 c.update_config(cfg)
603 with provisionalcompleter():
603 with provisionalcompleter():
604 c.use_jedi = False
604 c.use_jedi = False
605 s, matches = c.complete("ip.")
605 s, matches = c.complete("ip.")
606 nt.assert_not_in("ip.__str__", matches)
606 nt.assert_not_in("ip.__str__", matches)
607 nt.assert_not_in("ip._hidden_attr", matches)
607 nt.assert_not_in("ip._hidden_attr", matches)
608
608
609 # c.use_jedi = True
609 # c.use_jedi = True
610 # completions = set(c.completions('ip.', 3))
610 # completions = set(c.completions('ip.', 3))
611 # nt.assert_not_in(Completion(3,3,'__str__'), completions)
611 # nt.assert_not_in(Completion(3,3,'__str__'), completions)
612 # nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
612 # nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
613
613
614 with provisionalcompleter():
614 with provisionalcompleter():
615 c.use_jedi = False
615 c.use_jedi = False
616 s, matches = c.complete("ip._x.")
616 s, matches = c.complete("ip._x.")
617 nt.assert_in("ip._x.keys", matches)
617 nt.assert_in("ip._x.keys", matches)
618
618
619 # c.use_jedi = True
619 # c.use_jedi = True
620 # completions = set(c.completions('ip._x.', 6))
620 # completions = set(c.completions('ip._x.', 6))
621 # nt.assert_in(Completion(6,6, "keys"), completions)
621 # nt.assert_in(Completion(6,6, "keys"), completions)
622
622
623 del ip._hidden_attr
623 del ip._hidden_attr
624 del ip._x
624 del ip._x
625
625
626 def test_limit_to__all__False_ok(self):
626 def test_limit_to__all__False_ok(self):
627 """
627 """
628 Limit to all is deprecated, once we remove it this test can go away.
628 Limit to all is deprecated, once we remove it this test can go away.
629 """
629 """
630 ip = get_ipython()
630 ip = get_ipython()
631 c = ip.Completer
631 c = ip.Completer
632 c.use_jedi = False
632 c.use_jedi = False
633 ip.ex("class D: x=24")
633 ip.ex("class D: x=24")
634 ip.ex("d=D()")
634 ip.ex("d=D()")
635 cfg = Config()
635 cfg = Config()
636 cfg.IPCompleter.limit_to__all__ = False
636 cfg.IPCompleter.limit_to__all__ = False
637 c.update_config(cfg)
637 c.update_config(cfg)
638 s, matches = c.complete("d.")
638 s, matches = c.complete("d.")
639 nt.assert_in("d.x", matches)
639 nt.assert_in("d.x", matches)
640
640
641 def test_get__all__entries_ok(self):
641 def test_get__all__entries_ok(self):
642 class A:
642 class A:
643 __all__ = ["x", 1]
643 __all__ = ["x", 1]
644
644
645 words = completer.get__all__entries(A())
645 words = completer.get__all__entries(A())
646 nt.assert_equal(words, ["x"])
646 nt.assert_equal(words, ["x"])
647
647
648 def test_get__all__entries_no__all__ok(self):
648 def test_get__all__entries_no__all__ok(self):
649 class A:
649 class A:
650 pass
650 pass
651
651
652 words = completer.get__all__entries(A())
652 words = completer.get__all__entries(A())
653 nt.assert_equal(words, [])
653 nt.assert_equal(words, [])
654
654
655 def test_func_kw_completions(self):
655 def test_func_kw_completions(self):
656 ip = get_ipython()
656 ip = get_ipython()
657 c = ip.Completer
657 c = ip.Completer
658 c.use_jedi = False
658 c.use_jedi = False
659 ip.ex("def myfunc(a=1,b=2): return a+b")
659 ip.ex("def myfunc(a=1,b=2): return a+b")
660 s, matches = c.complete(None, "myfunc(1,b")
660 s, matches = c.complete(None, "myfunc(1,b")
661 nt.assert_in("b=", matches)
661 nt.assert_in("b=", matches)
662 # Simulate completing with cursor right after b (pos==10):
662 # Simulate completing with cursor right after b (pos==10):
663 s, matches = c.complete(None, "myfunc(1,b)", 10)
663 s, matches = c.complete(None, "myfunc(1,b)", 10)
664 nt.assert_in("b=", matches)
664 nt.assert_in("b=", matches)
665 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
665 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
666 nt.assert_in("b=", matches)
666 nt.assert_in("b=", matches)
667 # builtin function
667 # builtin function
668 s, matches = c.complete(None, "min(k, k")
668 s, matches = c.complete(None, "min(k, k")
669 nt.assert_in("key=", matches)
669 nt.assert_in("key=", matches)
670
670
671 def test_default_arguments_from_docstring(self):
671 def test_default_arguments_from_docstring(self):
672 ip = get_ipython()
672 ip = get_ipython()
673 c = ip.Completer
673 c = ip.Completer
674 kwd = c._default_arguments_from_docstring("min(iterable[, key=func]) -> value")
674 kwd = c._default_arguments_from_docstring("min(iterable[, key=func]) -> value")
675 nt.assert_equal(kwd, ["key"])
675 nt.assert_equal(kwd, ["key"])
676 # with cython type etc
676 # with cython type etc
677 kwd = c._default_arguments_from_docstring(
677 kwd = c._default_arguments_from_docstring(
678 "Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
678 "Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
679 )
679 )
680 nt.assert_equal(kwd, ["ncall", "resume", "nsplit"])
680 nt.assert_equal(kwd, ["ncall", "resume", "nsplit"])
681 # white spaces
681 # white spaces
682 kwd = c._default_arguments_from_docstring(
682 kwd = c._default_arguments_from_docstring(
683 "\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
683 "\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
684 )
684 )
685 nt.assert_equal(kwd, ["ncall", "resume", "nsplit"])
685 nt.assert_equal(kwd, ["ncall", "resume", "nsplit"])
686
686
687 def test_line_magics(self):
687 def test_line_magics(self):
688 ip = get_ipython()
688 ip = get_ipython()
689 c = ip.Completer
689 c = ip.Completer
690 s, matches = c.complete(None, "lsmag")
690 s, matches = c.complete(None, "lsmag")
691 nt.assert_in("%lsmagic", matches)
691 nt.assert_in("%lsmagic", matches)
692 s, matches = c.complete(None, "%lsmag")
692 s, matches = c.complete(None, "%lsmag")
693 nt.assert_in("%lsmagic", matches)
693 nt.assert_in("%lsmagic", matches)
694
694
695 def test_cell_magics(self):
695 def test_cell_magics(self):
696 from IPython.core.magic import register_cell_magic
696 from IPython.core.magic import register_cell_magic
697
697
698 @register_cell_magic
698 @register_cell_magic
699 def _foo_cellm(line, cell):
699 def _foo_cellm(line, cell):
700 pass
700 pass
701
701
702 ip = get_ipython()
702 ip = get_ipython()
703 c = ip.Completer
703 c = ip.Completer
704
704
705 s, matches = c.complete(None, "_foo_ce")
705 s, matches = c.complete(None, "_foo_ce")
706 nt.assert_in("%%_foo_cellm", matches)
706 nt.assert_in("%%_foo_cellm", matches)
707 s, matches = c.complete(None, "%%_foo_ce")
707 s, matches = c.complete(None, "%%_foo_ce")
708 nt.assert_in("%%_foo_cellm", matches)
708 nt.assert_in("%%_foo_cellm", matches)
709
709
710 def test_line_cell_magics(self):
710 def test_line_cell_magics(self):
711 from IPython.core.magic import register_line_cell_magic
711 from IPython.core.magic import register_line_cell_magic
712
712
713 @register_line_cell_magic
713 @register_line_cell_magic
714 def _bar_cellm(line, cell):
714 def _bar_cellm(line, cell):
715 pass
715 pass
716
716
717 ip = get_ipython()
717 ip = get_ipython()
718 c = ip.Completer
718 c = ip.Completer
719
719
720 # The policy here is trickier, see comments in completion code. The
720 # The policy here is trickier, see comments in completion code. The
721 # returned values depend on whether the user passes %% or not explicitly,
721 # returned values depend on whether the user passes %% or not explicitly,
722 # and this will show a difference if the same name is both a line and cell
722 # and this will show a difference if the same name is both a line and cell
723 # magic.
723 # magic.
724 s, matches = c.complete(None, "_bar_ce")
724 s, matches = c.complete(None, "_bar_ce")
725 nt.assert_in("%_bar_cellm", matches)
725 nt.assert_in("%_bar_cellm", matches)
726 nt.assert_in("%%_bar_cellm", matches)
726 nt.assert_in("%%_bar_cellm", matches)
727 s, matches = c.complete(None, "%_bar_ce")
727 s, matches = c.complete(None, "%_bar_ce")
728 nt.assert_in("%_bar_cellm", matches)
728 nt.assert_in("%_bar_cellm", matches)
729 nt.assert_in("%%_bar_cellm", matches)
729 nt.assert_in("%%_bar_cellm", matches)
730 s, matches = c.complete(None, "%%_bar_ce")
730 s, matches = c.complete(None, "%%_bar_ce")
731 nt.assert_not_in("%_bar_cellm", matches)
731 nt.assert_not_in("%_bar_cellm", matches)
732 nt.assert_in("%%_bar_cellm", matches)
732 nt.assert_in("%%_bar_cellm", matches)
733
733
734 def test_magic_completion_order(self):
734 def test_magic_completion_order(self):
735 ip = get_ipython()
735 ip = get_ipython()
736 c = ip.Completer
736 c = ip.Completer
737
737
738 # Test ordering of line and cell magics.
738 # Test ordering of line and cell magics.
739 text, matches = c.complete("timeit")
739 text, matches = c.complete("timeit")
740 nt.assert_equal(matches, ["%timeit", "%%timeit"])
740 nt.assert_equal(matches, ["%timeit", "%%timeit"])
741
741
742 def test_magic_completion_shadowing(self):
742 def test_magic_completion_shadowing(self):
743 ip = get_ipython()
743 ip = get_ipython()
744 c = ip.Completer
744 c = ip.Completer
745 c.use_jedi = False
745 c.use_jedi = False
746
746
747 # Before importing matplotlib, %matplotlib magic should be the only option.
747 # Before importing matplotlib, %matplotlib magic should be the only option.
748 text, matches = c.complete("mat")
748 text, matches = c.complete("mat")
749 nt.assert_equal(matches, ["%matplotlib"])
749 nt.assert_equal(matches, ["%matplotlib"])
750
750
751 # The newly introduced name should shadow the magic.
751 # The newly introduced name should shadow the magic.
752 ip.run_cell("matplotlib = 1")
752 ip.run_cell("matplotlib = 1")
753 text, matches = c.complete("mat")
753 text, matches = c.complete("mat")
754 nt.assert_equal(matches, ["matplotlib"])
754 nt.assert_equal(matches, ["matplotlib"])
755
755
756 # After removing matplotlib from namespace, the magic should again be
756 # After removing matplotlib from namespace, the magic should again be
757 # the only option.
757 # the only option.
758 del ip.user_ns["matplotlib"]
758 del ip.user_ns["matplotlib"]
759 text, matches = c.complete("mat")
759 text, matches = c.complete("mat")
760 nt.assert_equal(matches, ["%matplotlib"])
760 nt.assert_equal(matches, ["%matplotlib"])
761
761
762 def test_magic_completion_shadowing_explicit(self):
762 def test_magic_completion_shadowing_explicit(self):
763 """
763 """
764 If the user try to complete a shadowed magic, and explicit % start should
764 If the user try to complete a shadowed magic, and explicit % start should
765 still return the completions.
765 still return the completions.
766 """
766 """
767 ip = get_ipython()
767 ip = get_ipython()
768 c = ip.Completer
768 c = ip.Completer
769
769
770 # Before importing matplotlib, %matplotlib magic should be the only option.
770 # Before importing matplotlib, %matplotlib magic should be the only option.
771 text, matches = c.complete("%mat")
771 text, matches = c.complete("%mat")
772 nt.assert_equal(matches, ["%matplotlib"])
772 nt.assert_equal(matches, ["%matplotlib"])
773
773
774 ip.run_cell("matplotlib = 1")
774 ip.run_cell("matplotlib = 1")
775
775
776 # After removing matplotlib from namespace, the magic should still be
776 # After removing matplotlib from namespace, the magic should still be
777 # the only option.
777 # the only option.
778 text, matches = c.complete("%mat")
778 text, matches = c.complete("%mat")
779 nt.assert_equal(matches, ["%matplotlib"])
779 nt.assert_equal(matches, ["%matplotlib"])
780
780
781 def test_magic_config(self):
781 def test_magic_config(self):
782 ip = get_ipython()
782 ip = get_ipython()
783 c = ip.Completer
783 c = ip.Completer
784
784
785 s, matches = c.complete(None, "conf")
785 s, matches = c.complete(None, "conf")
786 nt.assert_in("%config", matches)
786 nt.assert_in("%config", matches)
787 s, matches = c.complete(None, "conf")
787 s, matches = c.complete(None, "conf")
788 nt.assert_not_in("AliasManager", matches)
788 nt.assert_not_in("AliasManager", matches)
789 s, matches = c.complete(None, "config ")
789 s, matches = c.complete(None, "config ")
790 nt.assert_in("AliasManager", matches)
790 nt.assert_in("AliasManager", matches)
791 s, matches = c.complete(None, "%config ")
791 s, matches = c.complete(None, "%config ")
792 nt.assert_in("AliasManager", matches)
792 nt.assert_in("AliasManager", matches)
793 s, matches = c.complete(None, "config Ali")
793 s, matches = c.complete(None, "config Ali")
794 nt.assert_list_equal(["AliasManager"], matches)
794 nt.assert_list_equal(["AliasManager"], matches)
795 s, matches = c.complete(None, "%config Ali")
795 s, matches = c.complete(None, "%config Ali")
796 nt.assert_list_equal(["AliasManager"], matches)
796 nt.assert_list_equal(["AliasManager"], matches)
797 s, matches = c.complete(None, "config AliasManager")
797 s, matches = c.complete(None, "config AliasManager")
798 nt.assert_list_equal(["AliasManager"], matches)
798 nt.assert_list_equal(["AliasManager"], matches)
799 s, matches = c.complete(None, "%config AliasManager")
799 s, matches = c.complete(None, "%config AliasManager")
800 nt.assert_list_equal(["AliasManager"], matches)
800 nt.assert_list_equal(["AliasManager"], matches)
801 s, matches = c.complete(None, "config AliasManager.")
801 s, matches = c.complete(None, "config AliasManager.")
802 nt.assert_in("AliasManager.default_aliases", matches)
802 nt.assert_in("AliasManager.default_aliases", matches)
803 s, matches = c.complete(None, "%config AliasManager.")
803 s, matches = c.complete(None, "%config AliasManager.")
804 nt.assert_in("AliasManager.default_aliases", matches)
804 nt.assert_in("AliasManager.default_aliases", matches)
805 s, matches = c.complete(None, "config AliasManager.de")
805 s, matches = c.complete(None, "config AliasManager.de")
806 nt.assert_list_equal(["AliasManager.default_aliases"], matches)
806 nt.assert_list_equal(["AliasManager.default_aliases"], matches)
807 s, matches = c.complete(None, "config AliasManager.de")
807 s, matches = c.complete(None, "config AliasManager.de")
808 nt.assert_list_equal(["AliasManager.default_aliases"], matches)
808 nt.assert_list_equal(["AliasManager.default_aliases"], matches)
809
809
810 def test_magic_color(self):
810 def test_magic_color(self):
811 ip = get_ipython()
811 ip = get_ipython()
812 c = ip.Completer
812 c = ip.Completer
813
813
814 s, matches = c.complete(None, "colo")
814 s, matches = c.complete(None, "colo")
815 nt.assert_in("%colors", matches)
815 nt.assert_in("%colors", matches)
816 s, matches = c.complete(None, "colo")
816 s, matches = c.complete(None, "colo")
817 nt.assert_not_in("NoColor", matches)
817 nt.assert_not_in("NoColor", matches)
818 s, matches = c.complete(None, "%colors") # No trailing space
818 s, matches = c.complete(None, "%colors") # No trailing space
819 nt.assert_not_in("NoColor", matches)
819 nt.assert_not_in("NoColor", matches)
820 s, matches = c.complete(None, "colors ")
820 s, matches = c.complete(None, "colors ")
821 nt.assert_in("NoColor", matches)
821 nt.assert_in("NoColor", matches)
822 s, matches = c.complete(None, "%colors ")
822 s, matches = c.complete(None, "%colors ")
823 nt.assert_in("NoColor", matches)
823 nt.assert_in("NoColor", matches)
824 s, matches = c.complete(None, "colors NoCo")
824 s, matches = c.complete(None, "colors NoCo")
825 nt.assert_list_equal(["NoColor"], matches)
825 nt.assert_list_equal(["NoColor"], matches)
826 s, matches = c.complete(None, "%colors NoCo")
826 s, matches = c.complete(None, "%colors NoCo")
827 nt.assert_list_equal(["NoColor"], matches)
827 nt.assert_list_equal(["NoColor"], matches)
828
828
829 def test_match_dict_keys(self):
829 def test_match_dict_keys(self):
830 """
830 """
831 Test that match_dict_keys works on a couple of use case does return what
831 Test that match_dict_keys works on a couple of use case does return what
832 expected, and does not crash
832 expected, and does not crash
833 """
833 """
834 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
834 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
835
835
836 keys = ["foo", b"far"]
836 keys = ["foo", b"far"]
837 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2, ["far"])
837 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2, ["far"])
838 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2, ["far"])
838 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2, ["far"])
839 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2, ["far"])
839 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2, ["far"])
840 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2, ["far"])
840 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2, ["far"])
841
841
842 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1, ["foo"])
842 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1, ["foo"])
843 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1, ["foo"])
843 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1, ["foo"])
844 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1, ["foo"])
844 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1, ["foo"])
845 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1, ["foo"])
845 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1, ["foo"])
846
846
847 match_dict_keys
847 match_dict_keys
848
848
849 def test_match_dict_keys_tuple(self):
850 """
851 Test that match_dict_keys called with extra prefix works on a couple of use case,
852 does return what expected, and does not crash.
853 """
854 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
855
856 keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ('other', 'test')]
857
858 # Completion on first key == "foo"
859 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["bar", "oof"])
860 assert match_dict_keys(keys, "\"", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["bar", "oof"])
861 assert match_dict_keys(keys, "'o", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["oof"])
862 assert match_dict_keys(keys, "\"o", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["oof"])
863 assert match_dict_keys(keys, "b'", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
864 assert match_dict_keys(keys, "b\"", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
865 assert match_dict_keys(keys, "b'b", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
866 assert match_dict_keys(keys, "b\"b", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
867
868 # No Completion
869 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("no_foo",)) == ("'", 1, [])
870 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("fo",)) == ("'", 1, [])
871
872 keys = [('foo1', 'foo2', 'foo3', 'foo4'), ('foo1', 'foo2', 'bar', 'foo4')]
873 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1',)) == ("'", 1, ["foo2", "foo2"])
874 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2')) == ("'", 1, ["foo3"])
875 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3')) == ("'", 1, ["foo4"])
876 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3', 'foo4')) == ("'", 1, [])
877
849 def test_dict_key_completion_string(self):
878 def test_dict_key_completion_string(self):
850 """Test dictionary key completion for string keys"""
879 """Test dictionary key completion for string keys"""
851 ip = get_ipython()
880 ip = get_ipython()
852 complete = ip.Completer.complete
881 complete = ip.Completer.complete
853
882
854 ip.user_ns["d"] = {"abc": None}
883 ip.user_ns["d"] = {"abc": None}
855
884
856 # check completion at different stages
885 # check completion at different stages
857 _, matches = complete(line_buffer="d[")
886 _, matches = complete(line_buffer="d[")
858 nt.assert_in("'abc'", matches)
887 nt.assert_in("'abc'", matches)
859 nt.assert_not_in("'abc']", matches)
888 nt.assert_not_in("'abc']", matches)
860
889
861 _, matches = complete(line_buffer="d['")
890 _, matches = complete(line_buffer="d['")
862 nt.assert_in("abc", matches)
891 nt.assert_in("abc", matches)
863 nt.assert_not_in("abc']", matches)
892 nt.assert_not_in("abc']", matches)
864
893
865 _, matches = complete(line_buffer="d['a")
894 _, matches = complete(line_buffer="d['a")
866 nt.assert_in("abc", matches)
895 nt.assert_in("abc", matches)
867 nt.assert_not_in("abc']", matches)
896 nt.assert_not_in("abc']", matches)
868
897
869 # check use of different quoting
898 # check use of different quoting
870 _, matches = complete(line_buffer='d["')
899 _, matches = complete(line_buffer='d["')
871 nt.assert_in("abc", matches)
900 nt.assert_in("abc", matches)
872 nt.assert_not_in('abc"]', matches)
901 nt.assert_not_in('abc"]', matches)
873
902
874 _, matches = complete(line_buffer='d["a')
903 _, matches = complete(line_buffer='d["a')
875 nt.assert_in("abc", matches)
904 nt.assert_in("abc", matches)
876 nt.assert_not_in('abc"]', matches)
905 nt.assert_not_in('abc"]', matches)
877
906
878 # check sensitivity to following context
907 # check sensitivity to following context
879 _, matches = complete(line_buffer="d[]", cursor_pos=2)
908 _, matches = complete(line_buffer="d[]", cursor_pos=2)
880 nt.assert_in("'abc'", matches)
909 nt.assert_in("'abc'", matches)
881
910
882 _, matches = complete(line_buffer="d['']", cursor_pos=3)
911 _, matches = complete(line_buffer="d['']", cursor_pos=3)
883 nt.assert_in("abc", matches)
912 nt.assert_in("abc", matches)
884 nt.assert_not_in("abc'", matches)
913 nt.assert_not_in("abc'", matches)
885 nt.assert_not_in("abc']", matches)
914 nt.assert_not_in("abc']", matches)
886
915
887 # check multiple solutions are correctly returned and that noise is not
916 # check multiple solutions are correctly returned and that noise is not
888 ip.user_ns["d"] = {
917 ip.user_ns["d"] = {
889 "abc": None,
918 "abc": None,
890 "abd": None,
919 "abd": None,
891 "bad": None,
920 "bad": None,
892 object(): None,
921 object(): None,
893 5: None,
922 5: None,
923 ("abe", None): None,
924 (None, "abf"): None
894 }
925 }
895
926
896 _, matches = complete(line_buffer="d['a")
927 _, matches = complete(line_buffer="d['a")
897 nt.assert_in("abc", matches)
928 nt.assert_in("abc", matches)
898 nt.assert_in("abd", matches)
929 nt.assert_in("abd", matches)
899 nt.assert_not_in("bad", matches)
930 nt.assert_not_in("bad", matches)
931 nt.assert_not_in("abe", matches)
932 nt.assert_not_in("abf", matches)
900 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
933 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
901
934
902 # check escaping and whitespace
935 # check escaping and whitespace
903 ip.user_ns["d"] = {"a\nb": None, "a'b": None, 'a"b': None, "a word": None}
936 ip.user_ns["d"] = {"a\nb": None, "a'b": None, 'a"b': None, "a word": None}
904 _, matches = complete(line_buffer="d['a")
937 _, matches = complete(line_buffer="d['a")
905 nt.assert_in("a\\nb", matches)
938 nt.assert_in("a\\nb", matches)
906 nt.assert_in("a\\'b", matches)
939 nt.assert_in("a\\'b", matches)
907 nt.assert_in('a"b', matches)
940 nt.assert_in('a"b', matches)
908 nt.assert_in("a word", matches)
941 nt.assert_in("a word", matches)
909 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
942 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
910
943
911 # - can complete on non-initial word of the string
944 # - can complete on non-initial word of the string
912 _, matches = complete(line_buffer="d['a w")
945 _, matches = complete(line_buffer="d['a w")
913 nt.assert_in("word", matches)
946 nt.assert_in("word", matches)
914
947
915 # - understands quote escaping
948 # - understands quote escaping
916 _, matches = complete(line_buffer="d['a\\'")
949 _, matches = complete(line_buffer="d['a\\'")
917 nt.assert_in("b", matches)
950 nt.assert_in("b", matches)
918
951
919 # - default quoting should work like repr
952 # - default quoting should work like repr
920 _, matches = complete(line_buffer="d[")
953 _, matches = complete(line_buffer="d[")
921 nt.assert_in('"a\'b"', matches)
954 nt.assert_in('"a\'b"', matches)
922
955
923 # - when opening quote with ", possible to match with unescaped apostrophe
956 # - when opening quote with ", possible to match with unescaped apostrophe
924 _, matches = complete(line_buffer="d[\"a'")
957 _, matches = complete(line_buffer="d[\"a'")
925 nt.assert_in("b", matches)
958 nt.assert_in("b", matches)
926
959
927 # need to not split at delims that readline won't split at
960 # need to not split at delims that readline won't split at
928 if "-" not in ip.Completer.splitter.delims:
961 if "-" not in ip.Completer.splitter.delims:
929 ip.user_ns["d"] = {"before-after": None}
962 ip.user_ns["d"] = {"before-after": None}
930 _, matches = complete(line_buffer="d['before-af")
963 _, matches = complete(line_buffer="d['before-af")
931 nt.assert_in("before-after", matches)
964 nt.assert_in("before-after", matches)
932
965
966 # check completion on tuple-of-string keys at different stage - on first key
967 ip.user_ns["d"] = {('foo', 'bar'): None}
968 _, matches = complete(line_buffer="d[")
969 nt.assert_in("'foo'", matches)
970 nt.assert_not_in("'foo']", matches)
971 nt.assert_not_in("'bar'", matches)
972 nt.assert_not_in("foo", matches)
973 nt.assert_not_in("bar", matches)
974
975 # - match the prefix
976 _, matches = complete(line_buffer="d['f")
977 nt.assert_in("foo", matches)
978 nt.assert_not_in("foo']", matches)
979 nt.assert_not_in("foo\"]", matches)
980 _, matches = complete(line_buffer="d['foo")
981 nt.assert_in("foo", matches)
982
983 # - can complete on second key
984 _, matches = complete(line_buffer="d['foo', ")
985 nt.assert_in("'bar'", matches)
986 _, matches = complete(line_buffer="d['foo', 'b")
987 nt.assert_in("bar", matches)
988 nt.assert_not_in("foo", matches)
989
990 # - does not propose missing keys
991 _, matches = complete(line_buffer="d['foo', 'f")
992 nt.assert_not_in("bar", matches)
993 nt.assert_not_in("foo", matches)
994
995 # check sensitivity to following context
996 _, matches = complete(line_buffer="d['foo',]", cursor_pos=8)
997 nt.assert_in("'bar'", matches)
998 nt.assert_not_in("bar", matches)
999 nt.assert_not_in("'foo'", matches)
1000 nt.assert_not_in("foo", matches)
1001
1002 _, matches = complete(line_buffer="d['']", cursor_pos=3)
1003 nt.assert_in("foo", matches)
1004 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1005
1006 _, matches = complete(line_buffer='d[""]', cursor_pos=3)
1007 nt.assert_in("foo", matches)
1008 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1009
1010 _, matches = complete(line_buffer='d["foo","]', cursor_pos=9)
1011 nt.assert_in("bar", matches)
1012 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1013
1014 _, matches = complete(line_buffer='d["foo",]', cursor_pos=8)
1015 nt.assert_in("'bar'", matches)
1016 nt.assert_not_in("bar", matches)
1017
1018 # Can complete with longer tuple keys
1019 ip.user_ns["d"] = {('foo', 'bar', 'foobar'): None}
1020
1021 # - can complete second key
1022 _, matches = complete(line_buffer="d['foo', 'b")
1023 nt.assert_in('bar', matches)
1024 nt.assert_not_in('foo', matches)
1025 nt.assert_not_in('foobar', matches)
1026
1027 # - can complete third key
1028 _, matches = complete(line_buffer="d['foo', 'bar', 'fo")
1029 nt.assert_in('foobar', matches)
1030 nt.assert_not_in('foo', matches)
1031 nt.assert_not_in('bar', matches)
1032
1033
933 def test_dict_key_completion_contexts(self):
1034 def test_dict_key_completion_contexts(self):
934 """Test expression contexts in which dict key completion occurs"""
1035 """Test expression contexts in which dict key completion occurs"""
935 ip = get_ipython()
1036 ip = get_ipython()
936 complete = ip.Completer.complete
1037 complete = ip.Completer.complete
937 d = {"abc": None}
1038 d = {"abc": None}
938 ip.user_ns["d"] = d
1039 ip.user_ns["d"] = d
939
1040
940 class C:
1041 class C:
941 data = d
1042 data = d
942
1043
943 ip.user_ns["C"] = C
1044 ip.user_ns["C"] = C
944 ip.user_ns["get"] = lambda: d
1045 ip.user_ns["get"] = lambda: d
945
1046
946 def assert_no_completion(**kwargs):
1047 def assert_no_completion(**kwargs):
947 _, matches = complete(**kwargs)
1048 _, matches = complete(**kwargs)
948 nt.assert_not_in("abc", matches)
1049 nt.assert_not_in("abc", matches)
949 nt.assert_not_in("abc'", matches)
1050 nt.assert_not_in("abc'", matches)
950 nt.assert_not_in("abc']", matches)
1051 nt.assert_not_in("abc']", matches)
951 nt.assert_not_in("'abc'", matches)
1052 nt.assert_not_in("'abc'", matches)
952 nt.assert_not_in("'abc']", matches)
1053 nt.assert_not_in("'abc']", matches)
953
1054
954 def assert_completion(**kwargs):
1055 def assert_completion(**kwargs):
955 _, matches = complete(**kwargs)
1056 _, matches = complete(**kwargs)
956 nt.assert_in("'abc'", matches)
1057 nt.assert_in("'abc'", matches)
957 nt.assert_not_in("'abc']", matches)
1058 nt.assert_not_in("'abc']", matches)
958
1059
959 # no completion after string closed, even if reopened
1060 # no completion after string closed, even if reopened
960 assert_no_completion(line_buffer="d['a'")
1061 assert_no_completion(line_buffer="d['a'")
961 assert_no_completion(line_buffer='d["a"')
1062 assert_no_completion(line_buffer='d["a"')
962 assert_no_completion(line_buffer="d['a' + ")
1063 assert_no_completion(line_buffer="d['a' + ")
963 assert_no_completion(line_buffer="d['a' + '")
1064 assert_no_completion(line_buffer="d['a' + '")
964
1065
965 # completion in non-trivial expressions
1066 # completion in non-trivial expressions
966 assert_completion(line_buffer="+ d[")
1067 assert_completion(line_buffer="+ d[")
967 assert_completion(line_buffer="(d[")
1068 assert_completion(line_buffer="(d[")
968 assert_completion(line_buffer="C.data[")
1069 assert_completion(line_buffer="C.data[")
969
1070
970 # greedy flag
1071 # greedy flag
971 def assert_completion(**kwargs):
1072 def assert_completion(**kwargs):
972 _, matches = complete(**kwargs)
1073 _, matches = complete(**kwargs)
973 nt.assert_in("get()['abc']", matches)
1074 nt.assert_in("get()['abc']", matches)
974
1075
975 assert_no_completion(line_buffer="get()[")
1076 assert_no_completion(line_buffer="get()[")
976 with greedy_completion():
1077 with greedy_completion():
977 assert_completion(line_buffer="get()[")
1078 assert_completion(line_buffer="get()[")
978 assert_completion(line_buffer="get()['")
1079 assert_completion(line_buffer="get()['")
979 assert_completion(line_buffer="get()['a")
1080 assert_completion(line_buffer="get()['a")
980 assert_completion(line_buffer="get()['ab")
1081 assert_completion(line_buffer="get()['ab")
981 assert_completion(line_buffer="get()['abc")
1082 assert_completion(line_buffer="get()['abc")
982
1083
983 def test_dict_key_completion_bytes(self):
1084 def test_dict_key_completion_bytes(self):
984 """Test handling of bytes in dict key completion"""
1085 """Test handling of bytes in dict key completion"""
985 ip = get_ipython()
1086 ip = get_ipython()
986 complete = ip.Completer.complete
1087 complete = ip.Completer.complete
987
1088
988 ip.user_ns["d"] = {"abc": None, b"abd": None}
1089 ip.user_ns["d"] = {"abc": None, b"abd": None}
989
1090
990 _, matches = complete(line_buffer="d[")
1091 _, matches = complete(line_buffer="d[")
991 nt.assert_in("'abc'", matches)
1092 nt.assert_in("'abc'", matches)
992 nt.assert_in("b'abd'", matches)
1093 nt.assert_in("b'abd'", matches)
993
1094
994 if False: # not currently implemented
1095 if False: # not currently implemented
995 _, matches = complete(line_buffer="d[b")
1096 _, matches = complete(line_buffer="d[b")
996 nt.assert_in("b'abd'", matches)
1097 nt.assert_in("b'abd'", matches)
997 nt.assert_not_in("b'abc'", matches)
1098 nt.assert_not_in("b'abc'", matches)
998
1099
999 _, matches = complete(line_buffer="d[b'")
1100 _, matches = complete(line_buffer="d[b'")
1000 nt.assert_in("abd", matches)
1101 nt.assert_in("abd", matches)
1001 nt.assert_not_in("abc", matches)
1102 nt.assert_not_in("abc", matches)
1002
1103
1003 _, matches = complete(line_buffer="d[B'")
1104 _, matches = complete(line_buffer="d[B'")
1004 nt.assert_in("abd", matches)
1105 nt.assert_in("abd", matches)
1005 nt.assert_not_in("abc", matches)
1106 nt.assert_not_in("abc", matches)
1006
1107
1007 _, matches = complete(line_buffer="d['")
1108 _, matches = complete(line_buffer="d['")
1008 nt.assert_in("abc", matches)
1109 nt.assert_in("abc", matches)
1009 nt.assert_not_in("abd", matches)
1110 nt.assert_not_in("abd", matches)
1010
1111
1011 def test_dict_key_completion_unicode_py3(self):
1112 def test_dict_key_completion_unicode_py3(self):
1012 """Test handling of unicode in dict key completion"""
1113 """Test handling of unicode in dict key completion"""
1013 ip = get_ipython()
1114 ip = get_ipython()
1014 complete = ip.Completer.complete
1115 complete = ip.Completer.complete
1015
1116
1016 ip.user_ns["d"] = {"a\u05d0": None}
1117 ip.user_ns["d"] = {"a\u05d0": None}
1017
1118
1018 # query using escape
1119 # query using escape
1019 if sys.platform != "win32":
1120 if sys.platform != "win32":
1020 # Known failure on Windows
1121 # Known failure on Windows
1021 _, matches = complete(line_buffer="d['a\\u05d0")
1122 _, matches = complete(line_buffer="d['a\\u05d0")
1022 nt.assert_in("u05d0", matches) # tokenized after \\
1123 nt.assert_in("u05d0", matches) # tokenized after \\
1023
1124
1024 # query using character
1125 # query using character
1025 _, matches = complete(line_buffer="d['a\u05d0")
1126 _, matches = complete(line_buffer="d['a\u05d0")
1026 nt.assert_in("a\u05d0", matches)
1127 nt.assert_in("a\u05d0", matches)
1027
1128
1028 with greedy_completion():
1129 with greedy_completion():
1029 # query using escape
1130 # query using escape
1030 _, matches = complete(line_buffer="d['a\\u05d0")
1131 _, matches = complete(line_buffer="d['a\\u05d0")
1031 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
1132 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
1032
1133
1033 # query using character
1134 # query using character
1034 _, matches = complete(line_buffer="d['a\u05d0")
1135 _, matches = complete(line_buffer="d['a\u05d0")
1035 nt.assert_in("d['a\u05d0']", matches)
1136 nt.assert_in("d['a\u05d0']", matches)
1036
1137
1037 @dec.skip_without("numpy")
1138 @dec.skip_without("numpy")
1038 def test_struct_array_key_completion(self):
1139 def test_struct_array_key_completion(self):
1039 """Test dict key completion applies to numpy struct arrays"""
1140 """Test dict key completion applies to numpy struct arrays"""
1040 import numpy
1141 import numpy
1041
1142
1042 ip = get_ipython()
1143 ip = get_ipython()
1043 complete = ip.Completer.complete
1144 complete = ip.Completer.complete
1044 ip.user_ns["d"] = numpy.array([], dtype=[("hello", "f"), ("world", "f")])
1145 ip.user_ns["d"] = numpy.array([], dtype=[("hello", "f"), ("world", "f")])
1045 _, matches = complete(line_buffer="d['")
1146 _, matches = complete(line_buffer="d['")
1046 nt.assert_in("hello", matches)
1147 nt.assert_in("hello", matches)
1047 nt.assert_in("world", matches)
1148 nt.assert_in("world", matches)
1048 # complete on the numpy struct itself
1149 # complete on the numpy struct itself
1049 dt = numpy.dtype(
1150 dt = numpy.dtype(
1050 [("my_head", [("my_dt", ">u4"), ("my_df", ">u4")]), ("my_data", ">f4", 5)]
1151 [("my_head", [("my_dt", ">u4"), ("my_df", ">u4")]), ("my_data", ">f4", 5)]
1051 )
1152 )
1052 x = numpy.zeros(2, dtype=dt)
1153 x = numpy.zeros(2, dtype=dt)
1053 ip.user_ns["d"] = x[1]
1154 ip.user_ns["d"] = x[1]
1054 _, matches = complete(line_buffer="d['")
1155 _, matches = complete(line_buffer="d['")
1055 nt.assert_in("my_head", matches)
1156 nt.assert_in("my_head", matches)
1056 nt.assert_in("my_data", matches)
1157 nt.assert_in("my_data", matches)
1057 # complete on a nested level
1158 # complete on a nested level
1058 with greedy_completion():
1159 with greedy_completion():
1059 ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
1160 ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
1060 _, matches = complete(line_buffer="d[1]['my_head']['")
1161 _, matches = complete(line_buffer="d[1]['my_head']['")
1061 nt.assert_true(any(["my_dt" in m for m in matches]))
1162 nt.assert_true(any(["my_dt" in m for m in matches]))
1062 nt.assert_true(any(["my_df" in m for m in matches]))
1163 nt.assert_true(any(["my_df" in m for m in matches]))
1063
1164
1064 @dec.skip_without("pandas")
1165 @dec.skip_without("pandas")
1065 def test_dataframe_key_completion(self):
1166 def test_dataframe_key_completion(self):
1066 """Test dict key completion applies to pandas DataFrames"""
1167 """Test dict key completion applies to pandas DataFrames"""
1067 import pandas
1168 import pandas
1068
1169
1069 ip = get_ipython()
1170 ip = get_ipython()
1070 complete = ip.Completer.complete
1171 complete = ip.Completer.complete
1071 ip.user_ns["d"] = pandas.DataFrame({"hello": [1], "world": [2]})
1172 ip.user_ns["d"] = pandas.DataFrame({"hello": [1], "world": [2]})
1072 _, matches = complete(line_buffer="d['")
1173 _, matches = complete(line_buffer="d['")
1073 nt.assert_in("hello", matches)
1174 nt.assert_in("hello", matches)
1074 nt.assert_in("world", matches)
1175 nt.assert_in("world", matches)
1075
1176
1076 def test_dict_key_completion_invalids(self):
1177 def test_dict_key_completion_invalids(self):
1077 """Smoke test cases dict key completion can't handle"""
1178 """Smoke test cases dict key completion can't handle"""
1078 ip = get_ipython()
1179 ip = get_ipython()
1079 complete = ip.Completer.complete
1180 complete = ip.Completer.complete
1080
1181
1081 ip.user_ns["no_getitem"] = None
1182 ip.user_ns["no_getitem"] = None
1082 ip.user_ns["no_keys"] = []
1183 ip.user_ns["no_keys"] = []
1083 ip.user_ns["cant_call_keys"] = dict
1184 ip.user_ns["cant_call_keys"] = dict
1084 ip.user_ns["empty"] = {}
1185 ip.user_ns["empty"] = {}
1085 ip.user_ns["d"] = {"abc": 5}
1186 ip.user_ns["d"] = {"abc": 5}
1086
1187
1087 _, matches = complete(line_buffer="no_getitem['")
1188 _, matches = complete(line_buffer="no_getitem['")
1088 _, matches = complete(line_buffer="no_keys['")
1189 _, matches = complete(line_buffer="no_keys['")
1089 _, matches = complete(line_buffer="cant_call_keys['")
1190 _, matches = complete(line_buffer="cant_call_keys['")
1090 _, matches = complete(line_buffer="empty['")
1191 _, matches = complete(line_buffer="empty['")
1091 _, matches = complete(line_buffer="name_error['")
1192 _, matches = complete(line_buffer="name_error['")
1092 _, matches = complete(line_buffer="d['\\") # incomplete escape
1193 _, matches = complete(line_buffer="d['\\") # incomplete escape
1093
1194
1094 def test_object_key_completion(self):
1195 def test_object_key_completion(self):
1095 ip = get_ipython()
1196 ip = get_ipython()
1096 ip.user_ns["key_completable"] = KeyCompletable(["qwerty", "qwick"])
1197 ip.user_ns["key_completable"] = KeyCompletable(["qwerty", "qwick"])
1097
1198
1098 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
1199 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
1099 nt.assert_in("qwerty", matches)
1200 nt.assert_in("qwerty", matches)
1100 nt.assert_in("qwick", matches)
1201 nt.assert_in("qwick", matches)
1101
1202
1102 def test_class_key_completion(self):
1203 def test_class_key_completion(self):
1103 ip = get_ipython()
1204 ip = get_ipython()
1104 NamedInstanceClass("qwerty")
1205 NamedInstanceClass("qwerty")
1105 NamedInstanceClass("qwick")
1206 NamedInstanceClass("qwick")
1106 ip.user_ns["named_instance_class"] = NamedInstanceClass
1207 ip.user_ns["named_instance_class"] = NamedInstanceClass
1107
1208
1108 _, matches = ip.Completer.complete(line_buffer="named_instance_class['qw")
1209 _, matches = ip.Completer.complete(line_buffer="named_instance_class['qw")
1109 nt.assert_in("qwerty", matches)
1210 nt.assert_in("qwerty", matches)
1110 nt.assert_in("qwick", matches)
1211 nt.assert_in("qwick", matches)
1111
1212
1112 def test_tryimport(self):
1213 def test_tryimport(self):
1113 """
1214 """
1114 Test that try-import don't crash on trailing dot, and import modules before
1215 Test that try-import don't crash on trailing dot, and import modules before
1115 """
1216 """
1116 from IPython.core.completerlib import try_import
1217 from IPython.core.completerlib import try_import
1117
1218
1118 assert try_import("IPython.")
1219 assert try_import("IPython.")
1119
1220
1120 def test_aimport_module_completer(self):
1221 def test_aimport_module_completer(self):
1121 ip = get_ipython()
1222 ip = get_ipython()
1122 _, matches = ip.complete("i", "%aimport i")
1223 _, matches = ip.complete("i", "%aimport i")
1123 nt.assert_in("io", matches)
1224 nt.assert_in("io", matches)
1124 nt.assert_not_in("int", matches)
1225 nt.assert_not_in("int", matches)
1125
1226
1126 def test_nested_import_module_completer(self):
1227 def test_nested_import_module_completer(self):
1127 ip = get_ipython()
1228 ip = get_ipython()
1128 _, matches = ip.complete(None, "import IPython.co", 17)
1229 _, matches = ip.complete(None, "import IPython.co", 17)
1129 nt.assert_in("IPython.core", matches)
1230 nt.assert_in("IPython.core", matches)
1130 nt.assert_not_in("import IPython.core", matches)
1231 nt.assert_not_in("import IPython.core", matches)
1131 nt.assert_not_in("IPython.display", matches)
1232 nt.assert_not_in("IPython.display", matches)
1132
1233
1133 def test_import_module_completer(self):
1234 def test_import_module_completer(self):
1134 ip = get_ipython()
1235 ip = get_ipython()
1135 _, matches = ip.complete("i", "import i")
1236 _, matches = ip.complete("i", "import i")
1136 nt.assert_in("io", matches)
1237 nt.assert_in("io", matches)
1137 nt.assert_not_in("int", matches)
1238 nt.assert_not_in("int", matches)
1138
1239
1139 def test_from_module_completer(self):
1240 def test_from_module_completer(self):
1140 ip = get_ipython()
1241 ip = get_ipython()
1141 _, matches = ip.complete("B", "from io import B", 16)
1242 _, matches = ip.complete("B", "from io import B", 16)
1142 nt.assert_in("BytesIO", matches)
1243 nt.assert_in("BytesIO", matches)
1143 nt.assert_not_in("BaseException", matches)
1244 nt.assert_not_in("BaseException", matches)
1144
1245
1145 def test_snake_case_completion(self):
1246 def test_snake_case_completion(self):
1146 ip = get_ipython()
1247 ip = get_ipython()
1147 ip.Completer.use_jedi = False
1248 ip.Completer.use_jedi = False
1148 ip.user_ns["some_three"] = 3
1249 ip.user_ns["some_three"] = 3
1149 ip.user_ns["some_four"] = 4
1250 ip.user_ns["some_four"] = 4
1150 _, matches = ip.complete("s_", "print(s_f")
1251 _, matches = ip.complete("s_", "print(s_f")
1151 nt.assert_in("some_three", matches)
1252 nt.assert_in("some_three", matches)
1152 nt.assert_in("some_four", matches)
1253 nt.assert_in("some_four", matches)
1153
1254
1154 def test_mix_terms(self):
1255 def test_mix_terms(self):
1155 ip = get_ipython()
1256 ip = get_ipython()
1156 from textwrap import dedent
1257 from textwrap import dedent
1157
1258
1158 ip.Completer.use_jedi = False
1259 ip.Completer.use_jedi = False
1159 ip.ex(
1260 ip.ex(
1160 dedent(
1261 dedent(
1161 """
1262 """
1162 class Test:
1263 class Test:
1163 def meth(self, meth_arg1):
1264 def meth(self, meth_arg1):
1164 print("meth")
1265 print("meth")
1165
1266
1166 def meth_1(self, meth1_arg1, meth1_arg2):
1267 def meth_1(self, meth1_arg1, meth1_arg2):
1167 print("meth1")
1268 print("meth1")
1168
1269
1169 def meth_2(self, meth2_arg1, meth2_arg2):
1270 def meth_2(self, meth2_arg1, meth2_arg2):
1170 print("meth2")
1271 print("meth2")
1171 test = Test()
1272 test = Test()
1172 """
1273 """
1173 )
1274 )
1174 )
1275 )
1175 _, matches = ip.complete(None, "test.meth(")
1276 _, matches = ip.complete(None, "test.meth(")
1176 nt.assert_in("meth_arg1=", matches)
1277 nt.assert_in("meth_arg1=", matches)
1177 nt.assert_not_in("meth2_arg1=", matches)
1278 nt.assert_not_in("meth2_arg1=", matches)
General Comments 0
You need to be logged in to leave comments. Login now