##// END OF EJS Templates
Support Bytes dict completions, and test it....
Matthias Bussonnier -
Show More
@@ -1,1736 +1,1754 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Completion for IPython.
2 """Completion for IPython.
3
3
4 This module started as fork of the rlcompleter module in the Python standard
4 This module started as fork of the rlcompleter module in the Python standard
5 library. The original enhancements made to rlcompleter have been sent
5 library. The original enhancements made to rlcompleter have been sent
6 upstream and were accepted as of Python 2.3,
6 upstream and were accepted as of Python 2.3,
7
7
8 This module now support a wide variety of completion mechanism both available
8 This module now support a wide variety of completion mechanism both available
9 for normal classic Python code, as well as completer for IPython specific
9 for normal classic Python code, as well as completer for IPython specific
10 Syntax like magics.
10 Syntax like magics.
11
11
12 Experimental
12 Experimental
13 ============
13 ============
14
14
15 Starting with IPython 6.0, this module can make use of the Jedi library to
15 Starting with IPython 6.0, this module can make use of the Jedi library to
16 generate completions both using static analysis of the code, and dynamically
16 generate completions both using static analysis of the code, and dynamically
17 inspecting multiple namespaces. The APIs attached to this new mechanism is
17 inspecting multiple namespaces. The APIs attached to this new mechanism is
18 unstable and will raise unless use in an :any:`provisionalcompleter` context
18 unstable and will raise unless use in an :any:`provisionalcompleter` context
19 manager.
19 manager.
20
20
21 You will find that the following are experimental:
21 You will find that the following are experimental:
22
22
23 - :any:`provisionalcompleter`
23 - :any:`provisionalcompleter`
24 - :any:`IPCompleter.completions`
24 - :any:`IPCompleter.completions`
25 - :any:`Completion`
25 - :any:`Completion`
26 - :any:`rectify_completions`
26 - :any:`rectify_completions`
27
27
28 .. note::
28 .. note::
29
29
30 better name for :any:`rectify_completions` ?
30 better name for :any:`rectify_completions` ?
31
31
32 We welcome any feedback on these new API, and we also encourage you to try this
32 We welcome any feedback on these new API, and we also encourage you to try this
33 module in debug mode (start IPython with ``--Completer.debug=True``) in order
33 module in debug mode (start IPython with ``--Completer.debug=True``) in order
34 to have extra logging information is :any:`jedi` is crashing, or if current
34 to have extra logging information is :any:`jedi` is crashing, or if current
35 IPython completer pending deprecations are returning results not yet handled
35 IPython completer pending deprecations are returning results not yet handled
36 by :any:`jedi`.
36 by :any:`jedi`.
37
37
38 Using Jedi for tab completion allow snippets like the following to work without
38 Using Jedi for tab completion allow snippets like the following to work without
39 having to execute any code:
39 having to execute any code:
40
40
41 >>> myvar = ['hello', 42]
41 >>> myvar = ['hello', 42]
42 ... myvar[1].bi<tab>
42 ... myvar[1].bi<tab>
43
43
44 Tab completion will be able to infer that ``myvar[1]`` is a real number without
44 Tab completion will be able to infer that ``myvar[1]`` is a real number without
45 executing any code unlike the previously available ``IPCompleter.greedy``
45 executing any code unlike the previously available ``IPCompleter.greedy``
46 option.
46 option.
47
47
48 Be sure to update :any:`jedi` to the latest stable version or to try the
48 Be sure to update :any:`jedi` to the latest stable version or to try the
49 current development version to get better completions.
49 current development version to get better completions.
50 """
50 """
51
51
52 # skip module docstests
52 # skip module docstests
53 skip_doctest = True
53 skip_doctest = True
54
54
55 # Copyright (c) IPython Development Team.
55 # Copyright (c) IPython Development Team.
56 # Distributed under the terms of the Modified BSD License.
56 # Distributed under the terms of the Modified BSD License.
57 #
57 #
58 # Some of this code originated from rlcompleter in the Python standard library
58 # Some of this code originated from rlcompleter in the Python standard library
59 # Copyright (C) 2001 Python Software Foundation, www.python.org
59 # Copyright (C) 2001 Python Software Foundation, www.python.org
60
60
61
61
62 import __main__
62 import __main__
63 import builtins as builtin_mod
63 import builtins as builtin_mod
64 import glob
64 import glob
65 import time
65 import time
66 import inspect
66 import inspect
67 import itertools
67 import itertools
68 import keyword
68 import keyword
69 import os
69 import os
70 import re
70 import re
71 import sys
71 import sys
72 import unicodedata
72 import unicodedata
73 import string
73 import string
74 import warnings
74 import warnings
75
75
76 from contextlib import contextmanager
76 from contextlib import contextmanager
77 from importlib import import_module
77 from importlib import import_module
78 from typing import Iterator, List
78 from typing import Iterator, List
79 from types import SimpleNamespace
79 from types import SimpleNamespace
80
80
81 from traitlets.config.configurable import Configurable
81 from traitlets.config.configurable import Configurable
82 from IPython.core.error import TryNext
82 from IPython.core.error import TryNext
83 from IPython.core.inputsplitter import ESC_MAGIC
83 from IPython.core.inputsplitter import ESC_MAGIC
84 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
84 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
85 from IPython.utils import generics
85 from IPython.utils import generics
86 from IPython.utils.dir2 import dir2, get_real_method
86 from IPython.utils.dir2 import dir2, get_real_method
87 from IPython.utils.process import arg_split
87 from IPython.utils.process import arg_split
88 from IPython.utils.py3compat import cast_unicode_py2
88 from IPython.utils.py3compat import cast_unicode_py2
89 from traitlets import Bool, Enum, observe, Int
89 from traitlets import Bool, Enum, observe, Int
90
90
91 try:
91 try:
92 import jedi
92 import jedi
93 import jedi.api.helpers
93 import jedi.api.helpers
94 JEDI_INSTALLED = True
94 JEDI_INSTALLED = True
95 except ImportError:
95 except ImportError:
96 JEDI_INSTALLED = False
96 JEDI_INSTALLED = False
97 #-----------------------------------------------------------------------------
97 #-----------------------------------------------------------------------------
98 # Globals
98 # Globals
99 #-----------------------------------------------------------------------------
99 #-----------------------------------------------------------------------------
100
100
101 # Public API
101 # Public API
102 __all__ = ['Completer','IPCompleter']
102 __all__ = ['Completer','IPCompleter']
103
103
104 if sys.platform == 'win32':
104 if sys.platform == 'win32':
105 PROTECTABLES = ' '
105 PROTECTABLES = ' '
106 else:
106 else:
107 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
107 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
108
108
109
109
110 _deprecation_readline_sentinel = object()
110 _deprecation_readline_sentinel = object()
111
111
112
112
113 class ProvisionalCompleterWarning(FutureWarning):
113 class ProvisionalCompleterWarning(FutureWarning):
114 """
114 """
115 Exception raise by an experimental feature in this module.
115 Exception raise by an experimental feature in this module.
116
116
117 Wrap code in :any:`provisionalcompleter` context manager if you
117 Wrap code in :any:`provisionalcompleter` context manager if you
118 are certain you want to use an unstable feature.
118 are certain you want to use an unstable feature.
119 """
119 """
120 pass
120 pass
121
121
122 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
122 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
123
123
124 @contextmanager
124 @contextmanager
125 def provisionalcompleter(action='ignore'):
125 def provisionalcompleter(action='ignore'):
126 """
126 """
127
127
128
128
129 This contest manager has to be used in any place where unstable completer
129 This contest manager has to be used in any place where unstable completer
130 behavior and API may be called.
130 behavior and API may be called.
131
131
132 >>> with provisionalcompleter():
132 >>> with provisionalcompleter():
133 ... completer.do_experimetal_things() # works
133 ... completer.do_experimetal_things() # works
134
134
135 >>> completer.do_experimental_things() # raises.
135 >>> completer.do_experimental_things() # raises.
136
136
137 .. note:: Unstable
137 .. note:: Unstable
138
138
139 By using this context manager you agree that the API in use may change
139 By using this context manager you agree that the API in use may change
140 without warning, and that you won't complain if they do so.
140 without warning, and that you won't complain if they do so.
141
141
142 You also understand that if the API is not to you liking you should report
142 You also understand that if the API is not to you liking you should report
143 a bug to explain your use case upstream and improve the API and will loose
143 a bug to explain your use case upstream and improve the API and will loose
144 credibility if you complain after the API is make stable.
144 credibility if you complain after the API is make stable.
145
145
146 We'll be happy to get your feedback , feature request and improvement on
146 We'll be happy to get your feedback , feature request and improvement on
147 any of the unstable APIs !
147 any of the unstable APIs !
148 """
148 """
149 with warnings.catch_warnings():
149 with warnings.catch_warnings():
150 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
150 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
151 yield
151 yield
152
152
153
153
154 def has_open_quotes(s):
154 def has_open_quotes(s):
155 """Return whether a string has open quotes.
155 """Return whether a string has open quotes.
156
156
157 This simply counts whether the number of quote characters of either type in
157 This simply counts whether the number of quote characters of either type in
158 the string is odd.
158 the string is odd.
159
159
160 Returns
160 Returns
161 -------
161 -------
162 If there is an open quote, the quote character is returned. Else, return
162 If there is an open quote, the quote character is returned. Else, return
163 False.
163 False.
164 """
164 """
165 # We check " first, then ', so complex cases with nested quotes will get
165 # We check " first, then ', so complex cases with nested quotes will get
166 # the " to take precedence.
166 # the " to take precedence.
167 if s.count('"') % 2:
167 if s.count('"') % 2:
168 return '"'
168 return '"'
169 elif s.count("'") % 2:
169 elif s.count("'") % 2:
170 return "'"
170 return "'"
171 else:
171 else:
172 return False
172 return False
173
173
174
174
175 def protect_filename(s):
175 def protect_filename(s):
176 """Escape a string to protect certain characters."""
176 """Escape a string to protect certain characters."""
177 if set(s) & set(PROTECTABLES):
177 if set(s) & set(PROTECTABLES):
178 if sys.platform == "win32":
178 if sys.platform == "win32":
179 return '"' + s + '"'
179 return '"' + s + '"'
180 else:
180 else:
181 return "".join(("\\" + c if c in PROTECTABLES else c) for c in s)
181 return "".join(("\\" + c if c in PROTECTABLES else c) for c in s)
182 else:
182 else:
183 return s
183 return s
184
184
185
185
186 def expand_user(path):
186 def expand_user(path):
187 """Expand ``~``-style usernames in strings.
187 """Expand ``~``-style usernames in strings.
188
188
189 This is similar to :func:`os.path.expanduser`, but it computes and returns
189 This is similar to :func:`os.path.expanduser`, but it computes and returns
190 extra information that will be useful if the input was being used in
190 extra information that will be useful if the input was being used in
191 computing completions, and you wish to return the completions with the
191 computing completions, and you wish to return the completions with the
192 original '~' instead of its expanded value.
192 original '~' instead of its expanded value.
193
193
194 Parameters
194 Parameters
195 ----------
195 ----------
196 path : str
196 path : str
197 String to be expanded. If no ~ is present, the output is the same as the
197 String to be expanded. If no ~ is present, the output is the same as the
198 input.
198 input.
199
199
200 Returns
200 Returns
201 -------
201 -------
202 newpath : str
202 newpath : str
203 Result of ~ expansion in the input path.
203 Result of ~ expansion in the input path.
204 tilde_expand : bool
204 tilde_expand : bool
205 Whether any expansion was performed or not.
205 Whether any expansion was performed or not.
206 tilde_val : str
206 tilde_val : str
207 The value that ~ was replaced with.
207 The value that ~ was replaced with.
208 """
208 """
209 # Default values
209 # Default values
210 tilde_expand = False
210 tilde_expand = False
211 tilde_val = ''
211 tilde_val = ''
212 newpath = path
212 newpath = path
213
213
214 if path.startswith('~'):
214 if path.startswith('~'):
215 tilde_expand = True
215 tilde_expand = True
216 rest = len(path)-1
216 rest = len(path)-1
217 newpath = os.path.expanduser(path)
217 newpath = os.path.expanduser(path)
218 if rest:
218 if rest:
219 tilde_val = newpath[:-rest]
219 tilde_val = newpath[:-rest]
220 else:
220 else:
221 tilde_val = newpath
221 tilde_val = newpath
222
222
223 return newpath, tilde_expand, tilde_val
223 return newpath, tilde_expand, tilde_val
224
224
225
225
226 def compress_user(path, tilde_expand, tilde_val):
226 def compress_user(path, tilde_expand, tilde_val):
227 """Does the opposite of expand_user, with its outputs.
227 """Does the opposite of expand_user, with its outputs.
228 """
228 """
229 if tilde_expand:
229 if tilde_expand:
230 return path.replace(tilde_val, '~')
230 return path.replace(tilde_val, '~')
231 else:
231 else:
232 return path
232 return path
233
233
234
234
235 def completions_sorting_key(word):
235 def completions_sorting_key(word):
236 """key for sorting completions
236 """key for sorting completions
237
237
238 This does several things:
238 This does several things:
239
239
240 - Lowercase all completions, so they are sorted alphabetically with
240 - Lowercase all completions, so they are sorted alphabetically with
241 upper and lower case words mingled
241 upper and lower case words mingled
242 - Demote any completions starting with underscores to the end
242 - Demote any completions starting with underscores to the end
243 - Insert any %magic and %%cellmagic completions in the alphabetical order
243 - Insert any %magic and %%cellmagic completions in the alphabetical order
244 by their name
244 by their name
245 """
245 """
246 # Case insensitive sort
246 # Case insensitive sort
247 word = word.lower()
247 word = word.lower()
248
248
249 prio1, prio2 = 0, 0
249 prio1, prio2 = 0, 0
250
250
251 if word.startswith('__'):
251 if word.startswith('__'):
252 prio1 = 2
252 prio1 = 2
253 elif word.startswith('_'):
253 elif word.startswith('_'):
254 prio1 = 1
254 prio1 = 1
255
255
256 if word.endswith('='):
256 if word.endswith('='):
257 prio1 = -1
257 prio1 = -1
258
258
259 if word.startswith('%%'):
259 if word.startswith('%%'):
260 # If there's another % in there, this is something else, so leave it alone
260 # If there's another % in there, this is something else, so leave it alone
261 if not "%" in word[2:]:
261 if not "%" in word[2:]:
262 word = word[2:]
262 word = word[2:]
263 prio2 = 2
263 prio2 = 2
264 elif word.startswith('%'):
264 elif word.startswith('%'):
265 if not "%" in word[1:]:
265 if not "%" in word[1:]:
266 word = word[1:]
266 word = word[1:]
267 prio2 = 1
267 prio2 = 1
268
268
269 return prio1, word, prio2
269 return prio1, word, prio2
270
270
271
271
272 class _FakeJediCompletion:
272 class _FakeJediCompletion:
273 """
273 """
274 This is a workaround to communicate to the UI that Jedi has crashed and to
274 This is a workaround to communicate to the UI that Jedi has crashed and to
275 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
275 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
276
276
277 Added in IPython 6.0 so should likely be removed for 7.0
277 Added in IPython 6.0 so should likely be removed for 7.0
278
278
279 """
279 """
280
280
281 def __init__(self, name):
281 def __init__(self, name):
282
282
283 self.name = name
283 self.name = name
284 self.complete = name
284 self.complete = name
285 self.type = 'crashed'
285 self.type = 'crashed'
286 self.name_with_symbols = name
286 self.name_with_symbols = name
287
287
288 def __repr__(self):
288 def __repr__(self):
289 return '<Fake completion object jedi has crashed>'
289 return '<Fake completion object jedi has crashed>'
290
290
291
291
292 class Completion:
292 class Completion:
293 """
293 """
294 Completion object used and return by IPython completers.
294 Completion object used and return by IPython completers.
295
295
296 .. warning:: Unstable
296 .. warning:: Unstable
297
297
298 This function is unstable, API may change without warning.
298 This function is unstable, API may change without warning.
299 It will also raise unless use in proper context manager.
299 It will also raise unless use in proper context manager.
300
300
301 This act as a middle ground :any:`Completion` object between the
301 This act as a middle ground :any:`Completion` object between the
302 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
302 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
303 object. While Jedi need a lot of information about evaluator and how the
303 object. While Jedi need a lot of information about evaluator and how the
304 code should be ran/inspected, PromptToolkit (and other frontend) mostly
304 code should be ran/inspected, PromptToolkit (and other frontend) mostly
305 need user facing information.
305 need user facing information.
306
306
307 - Which range should be replaced replaced by what.
307 - Which range should be replaced replaced by what.
308 - Some metadata (like completion type), or meta informations to displayed to
308 - Some metadata (like completion type), or meta informations to displayed to
309 the use user.
309 the use user.
310
310
311 For debugging purpose we can also store the origin of the completion (``jedi``,
311 For debugging purpose we can also store the origin of the completion (``jedi``,
312 ``IPython.python_matches``, ``IPython.magics_matches``...).
312 ``IPython.python_matches``, ``IPython.magics_matches``...).
313 """
313 """
314
314
315 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin=''):
315 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin=''):
316 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
316 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
317 "It may change without warnings. "
317 "It may change without warnings. "
318 "Use in corresponding context manager.",
318 "Use in corresponding context manager.",
319 category=ProvisionalCompleterWarning, stacklevel=2)
319 category=ProvisionalCompleterWarning, stacklevel=2)
320
320
321 self.start = start
321 self.start = start
322 self.end = end
322 self.end = end
323 self.text = text
323 self.text = text
324 self.type = type
324 self.type = type
325 self._origin = _origin
325 self._origin = _origin
326
326
327 def __repr__(self):
327 def __repr__(self):
328 return '<Completion start=%s end=%s text=%r type=%r>' % (self.start, self.end, self.text, self.type or '?')
328 return '<Completion start=%s end=%s text=%r type=%r>' % (self.start, self.end, self.text, self.type or '?')
329
329
330 def __eq__(self, other)->Bool:
330 def __eq__(self, other)->Bool:
331 """
331 """
332 Equality and hash do not hash the type (as some completer may not be
332 Equality and hash do not hash the type (as some completer may not be
333 able to infer the type), but are use to (partially) de-duplicate
333 able to infer the type), but are use to (partially) de-duplicate
334 completion.
334 completion.
335
335
336 Completely de-duplicating completion is a bit tricker that just
336 Completely de-duplicating completion is a bit tricker that just
337 comparing as it depends on surrounding text, which Completions are not
337 comparing as it depends on surrounding text, which Completions are not
338 aware of.
338 aware of.
339 """
339 """
340 return self.start == other.start and \
340 return self.start == other.start and \
341 self.end == other.end and \
341 self.end == other.end and \
342 self.text == other.text
342 self.text == other.text
343
343
344 def __hash__(self):
344 def __hash__(self):
345 return hash((self.start, self.end, self.text))
345 return hash((self.start, self.end, self.text))
346
346
347 _IC = Iterator[Completion]
347 _IC = Iterator[Completion]
348
348
349 def rectify_completions(text:str, completion:_IC, *, _debug=False)->_IC:
349 def rectify_completions(text:str, completion:_IC, *, _debug=False)->_IC:
350 """
350 """
351 Rectify a set of completion to all have the same ``start`` and ``end``
351 Rectify a set of completion to all have the same ``start`` and ``end``
352
352
353 .. warning:: Unstable
353 .. warning:: Unstable
354
354
355 This function is unstable, API may change without warning.
355 This function is unstable, API may change without warning.
356 It will also raise unless use in proper context manager.
356 It will also raise unless use in proper context manager.
357
357
358 Parameters
358 Parameters
359 ----------
359 ----------
360 text: str
360 text: str
361 text that should be completed.
361 text that should be completed.
362 completion: Iterator[Completion]
362 completion: Iterator[Completion]
363 iterator over the completions to rectify
363 iterator over the completions to rectify
364
364
365
365
366 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
366 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
367 the Jupyter Protocol requires them to behave like so. This will readjust
367 the Jupyter Protocol requires them to behave like so. This will readjust
368 the completion to have the same ``start`` and ``end` by padding both
368 the completion to have the same ``start`` and ``end` by padding both
369 extremities with surrounding text.
369 extremities with surrounding text.
370
370
371 During stabilisation should support a ``_debug`` option to log which
371 During stabilisation should support a ``_debug`` option to log which
372 completion are return by the IPython completer and not found in Jedi in
372 completion are return by the IPython completer and not found in Jedi in
373 order to make upstream bug report.
373 order to make upstream bug report.
374 """
374 """
375 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
375 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
376 "It may change without warnings. "
376 "It may change without warnings. "
377 "Use in corresponding context manager.",
377 "Use in corresponding context manager.",
378 category=ProvisionalCompleterWarning, stacklevel=2)
378 category=ProvisionalCompleterWarning, stacklevel=2)
379
379
380 completions = list(completion)
380 completions = list(completion)
381 if not completions:
381 if not completions:
382 return
382 return
383 starts = (c.start for c in completions)
383 starts = (c.start for c in completions)
384 ends = (c.end for c in completions)
384 ends = (c.end for c in completions)
385
385
386 new_start = min(starts)
386 new_start = min(starts)
387 new_end = max(ends)
387 new_end = max(ends)
388
388
389 seen_jedi = set()
389 seen_jedi = set()
390 seen_python_matches = set()
390 seen_python_matches = set()
391 for c in completions:
391 for c in completions:
392 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
392 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
393 if c._origin == 'jedi':
393 if c._origin == 'jedi':
394 seen_jedi.add(new_text)
394 seen_jedi.add(new_text)
395 elif c._origin == 'IPCompleter.python_matches':
395 elif c._origin == 'IPCompleter.python_matches':
396 seen_python_matches.add(new_text)
396 seen_python_matches.add(new_text)
397 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin)
397 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin)
398 diff = seen_python_matches.difference(seen_jedi)
398 diff = seen_python_matches.difference(seen_jedi)
399 if diff and _debug:
399 if diff and _debug:
400 print('IPython.python matches have extras:', diff)
400 print('IPython.python matches have extras:', diff)
401
401
402
402
403 if sys.platform == 'win32':
403 if sys.platform == 'win32':
404 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
404 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
405 else:
405 else:
406 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
406 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
407
407
408 GREEDY_DELIMS = ' =\r\n'
408 GREEDY_DELIMS = ' =\r\n'
409
409
410
410
411 class CompletionSplitter(object):
411 class CompletionSplitter(object):
412 """An object to split an input line in a manner similar to readline.
412 """An object to split an input line in a manner similar to readline.
413
413
414 By having our own implementation, we can expose readline-like completion in
414 By having our own implementation, we can expose readline-like completion in
415 a uniform manner to all frontends. This object only needs to be given the
415 a uniform manner to all frontends. This object only needs to be given the
416 line of text to be split and the cursor position on said line, and it
416 line of text to be split and the cursor position on said line, and it
417 returns the 'word' to be completed on at the cursor after splitting the
417 returns the 'word' to be completed on at the cursor after splitting the
418 entire line.
418 entire line.
419
419
420 What characters are used as splitting delimiters can be controlled by
420 What characters are used as splitting delimiters can be controlled by
421 setting the ``delims`` attribute (this is a property that internally
421 setting the ``delims`` attribute (this is a property that internally
422 automatically builds the necessary regular expression)"""
422 automatically builds the necessary regular expression)"""
423
423
424 # Private interface
424 # Private interface
425
425
426 # A string of delimiter characters. The default value makes sense for
426 # A string of delimiter characters. The default value makes sense for
427 # IPython's most typical usage patterns.
427 # IPython's most typical usage patterns.
428 _delims = DELIMS
428 _delims = DELIMS
429
429
430 # The expression (a normal string) to be compiled into a regular expression
430 # The expression (a normal string) to be compiled into a regular expression
431 # for actual splitting. We store it as an attribute mostly for ease of
431 # for actual splitting. We store it as an attribute mostly for ease of
432 # debugging, since this type of code can be so tricky to debug.
432 # debugging, since this type of code can be so tricky to debug.
433 _delim_expr = None
433 _delim_expr = None
434
434
435 # The regular expression that does the actual splitting
435 # The regular expression that does the actual splitting
436 _delim_re = None
436 _delim_re = None
437
437
438 def __init__(self, delims=None):
438 def __init__(self, delims=None):
439 delims = CompletionSplitter._delims if delims is None else delims
439 delims = CompletionSplitter._delims if delims is None else delims
440 self.delims = delims
440 self.delims = delims
441
441
442 @property
442 @property
443 def delims(self):
443 def delims(self):
444 """Return the string of delimiter characters."""
444 """Return the string of delimiter characters."""
445 return self._delims
445 return self._delims
446
446
447 @delims.setter
447 @delims.setter
448 def delims(self, delims):
448 def delims(self, delims):
449 """Set the delimiters for line splitting."""
449 """Set the delimiters for line splitting."""
450 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
450 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
451 self._delim_re = re.compile(expr)
451 self._delim_re = re.compile(expr)
452 self._delims = delims
452 self._delims = delims
453 self._delim_expr = expr
453 self._delim_expr = expr
454
454
455 def split_line(self, line, cursor_pos=None):
455 def split_line(self, line, cursor_pos=None):
456 """Split a line of text with a cursor at the given position.
456 """Split a line of text with a cursor at the given position.
457 """
457 """
458 l = line if cursor_pos is None else line[:cursor_pos]
458 l = line if cursor_pos is None else line[:cursor_pos]
459 return self._delim_re.split(l)[-1]
459 return self._delim_re.split(l)[-1]
460
460
461
461
462
462
463 class Completer(Configurable):
463 class Completer(Configurable):
464
464
465 greedy = Bool(False,
465 greedy = Bool(False,
466 help="""Activate greedy completion
466 help="""Activate greedy completion
467 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
467 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
468
468
469 This will enable completion on elements of lists, results of function calls, etc.,
469 This will enable completion on elements of lists, results of function calls, etc.,
470 but can be unsafe because the code is actually evaluated on TAB.
470 but can be unsafe because the code is actually evaluated on TAB.
471 """
471 """
472 ).tag(config=True)
472 ).tag(config=True)
473
473
474 use_jedi = Bool(default_value=JEDI_INSTALLED,
474 use_jedi = Bool(default_value=JEDI_INSTALLED,
475 help="Experimental: Use Jedi to generate autocompletions. "
475 help="Experimental: Use Jedi to generate autocompletions. "
476 "Default to True if jedi is installed").tag(config=True)
476 "Default to True if jedi is installed").tag(config=True)
477
477
478 jedi_compute_type_timeout = Int(default_value=400,
478 jedi_compute_type_timeout = Int(default_value=400,
479 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
479 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
480 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
480 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
481 performance by preventing jedi to build its cache.
481 performance by preventing jedi to build its cache.
482 """).tag(config=True)
482 """).tag(config=True)
483
483
484 debug = Bool(default_value=False,
484 debug = Bool(default_value=False,
485 help='Enable debug for the Completer. Mostly print extra '
485 help='Enable debug for the Completer. Mostly print extra '
486 'information for experimental jedi integration.')\
486 'information for experimental jedi integration.')\
487 .tag(config=True)
487 .tag(config=True)
488
488
489
489
490 def __init__(self, namespace=None, global_namespace=None, **kwargs):
490 def __init__(self, namespace=None, global_namespace=None, **kwargs):
491 """Create a new completer for the command line.
491 """Create a new completer for the command line.
492
492
493 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
493 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
494
494
495 If unspecified, the default namespace where completions are performed
495 If unspecified, the default namespace where completions are performed
496 is __main__ (technically, __main__.__dict__). Namespaces should be
496 is __main__ (technically, __main__.__dict__). Namespaces should be
497 given as dictionaries.
497 given as dictionaries.
498
498
499 An optional second namespace can be given. This allows the completer
499 An optional second namespace can be given. This allows the completer
500 to handle cases where both the local and global scopes need to be
500 to handle cases where both the local and global scopes need to be
501 distinguished.
501 distinguished.
502 """
502 """
503
503
504 # Don't bind to namespace quite yet, but flag whether the user wants a
504 # Don't bind to namespace quite yet, but flag whether the user wants a
505 # specific namespace or to use __main__.__dict__. This will allow us
505 # specific namespace or to use __main__.__dict__. This will allow us
506 # to bind to __main__.__dict__ at completion time, not now.
506 # to bind to __main__.__dict__ at completion time, not now.
507 if namespace is None:
507 if namespace is None:
508 self.use_main_ns = True
508 self.use_main_ns = True
509 else:
509 else:
510 self.use_main_ns = False
510 self.use_main_ns = False
511 self.namespace = namespace
511 self.namespace = namespace
512
512
513 # The global namespace, if given, can be bound directly
513 # The global namespace, if given, can be bound directly
514 if global_namespace is None:
514 if global_namespace is None:
515 self.global_namespace = {}
515 self.global_namespace = {}
516 else:
516 else:
517 self.global_namespace = global_namespace
517 self.global_namespace = global_namespace
518
518
519 super(Completer, self).__init__(**kwargs)
519 super(Completer, self).__init__(**kwargs)
520
520
521 def complete(self, text, state):
521 def complete(self, text, state):
522 """Return the next possible completion for 'text'.
522 """Return the next possible completion for 'text'.
523
523
524 This is called successively with state == 0, 1, 2, ... until it
524 This is called successively with state == 0, 1, 2, ... until it
525 returns None. The completion should begin with 'text'.
525 returns None. The completion should begin with 'text'.
526
526
527 """
527 """
528 if self.use_main_ns:
528 if self.use_main_ns:
529 self.namespace = __main__.__dict__
529 self.namespace = __main__.__dict__
530
530
531 if state == 0:
531 if state == 0:
532 if "." in text:
532 if "." in text:
533 self.matches = self.attr_matches(text)
533 self.matches = self.attr_matches(text)
534 else:
534 else:
535 self.matches = self.global_matches(text)
535 self.matches = self.global_matches(text)
536 try:
536 try:
537 return self.matches[state]
537 return self.matches[state]
538 except IndexError:
538 except IndexError:
539 return None
539 return None
540
540
541 def global_matches(self, text):
541 def global_matches(self, text):
542 """Compute matches when text is a simple name.
542 """Compute matches when text is a simple name.
543
543
544 Return a list of all keywords, built-in functions and names currently
544 Return a list of all keywords, built-in functions and names currently
545 defined in self.namespace or self.global_namespace that match.
545 defined in self.namespace or self.global_namespace that match.
546
546
547 """
547 """
548 matches = []
548 matches = []
549 match_append = matches.append
549 match_append = matches.append
550 n = len(text)
550 n = len(text)
551 for lst in [keyword.kwlist,
551 for lst in [keyword.kwlist,
552 builtin_mod.__dict__.keys(),
552 builtin_mod.__dict__.keys(),
553 self.namespace.keys(),
553 self.namespace.keys(),
554 self.global_namespace.keys()]:
554 self.global_namespace.keys()]:
555 for word in lst:
555 for word in lst:
556 if word[:n] == text and word != "__builtins__":
556 if word[:n] == text and word != "__builtins__":
557 match_append(word)
557 match_append(word)
558 return [cast_unicode_py2(m) for m in matches]
558 return [cast_unicode_py2(m) for m in matches]
559
559
560 def attr_matches(self, text):
560 def attr_matches(self, text):
561 """Compute matches when text contains a dot.
561 """Compute matches when text contains a dot.
562
562
563 Assuming the text is of the form NAME.NAME....[NAME], and is
563 Assuming the text is of the form NAME.NAME....[NAME], and is
564 evaluatable in self.namespace or self.global_namespace, it will be
564 evaluatable in self.namespace or self.global_namespace, it will be
565 evaluated and its attributes (as revealed by dir()) are used as
565 evaluated and its attributes (as revealed by dir()) are used as
566 possible completions. (For class instances, class members are are
566 possible completions. (For class instances, class members are are
567 also considered.)
567 also considered.)
568
568
569 WARNING: this can still invoke arbitrary C code, if an object
569 WARNING: this can still invoke arbitrary C code, if an object
570 with a __getattr__ hook is evaluated.
570 with a __getattr__ hook is evaluated.
571
571
572 """
572 """
573
573
574 # Another option, seems to work great. Catches things like ''.<tab>
574 # Another option, seems to work great. Catches things like ''.<tab>
575 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
575 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
576
576
577 if m:
577 if m:
578 expr, attr = m.group(1, 3)
578 expr, attr = m.group(1, 3)
579 elif self.greedy:
579 elif self.greedy:
580 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
580 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
581 if not m2:
581 if not m2:
582 return []
582 return []
583 expr, attr = m2.group(1,2)
583 expr, attr = m2.group(1,2)
584 else:
584 else:
585 return []
585 return []
586
586
587 try:
587 try:
588 obj = eval(expr, self.namespace)
588 obj = eval(expr, self.namespace)
589 except:
589 except:
590 try:
590 try:
591 obj = eval(expr, self.global_namespace)
591 obj = eval(expr, self.global_namespace)
592 except:
592 except:
593 return []
593 return []
594
594
595 if self.limit_to__all__ and hasattr(obj, '__all__'):
595 if self.limit_to__all__ and hasattr(obj, '__all__'):
596 words = get__all__entries(obj)
596 words = get__all__entries(obj)
597 else:
597 else:
598 words = dir2(obj)
598 words = dir2(obj)
599
599
600 try:
600 try:
601 words = generics.complete_object(obj, words)
601 words = generics.complete_object(obj, words)
602 except TryNext:
602 except TryNext:
603 pass
603 pass
604 except AssertionError:
604 except AssertionError:
605 raise
605 raise
606 except Exception:
606 except Exception:
607 # Silence errors from completion function
607 # Silence errors from completion function
608 #raise # dbg
608 #raise # dbg
609 pass
609 pass
610 # Build match list to return
610 # Build match list to return
611 n = len(attr)
611 n = len(attr)
612 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
612 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
613
613
614
614
615 def get__all__entries(obj):
615 def get__all__entries(obj):
616 """returns the strings in the __all__ attribute"""
616 """returns the strings in the __all__ attribute"""
617 try:
617 try:
618 words = getattr(obj, '__all__')
618 words = getattr(obj, '__all__')
619 except:
619 except:
620 return []
620 return []
621
621
622 return [cast_unicode_py2(w) for w in words if isinstance(w, str)]
622 return [cast_unicode_py2(w) for w in words if isinstance(w, str)]
623
623
624
624
625 def match_dict_keys(keys, prefix, delims):
625 def match_dict_keys(keys: List[str], prefix: str, delims: str):
626 """Used by dict_key_matches, matching the prefix to a list of keys"""
626 """Used by dict_key_matches, matching the prefix to a list of keys
627
628 Parameters
629 ==========
630 keys:
631 list of keys in dictionary currently being completed.
632 prefix:
633 Part of the text already typed by the user. e.g. `mydict[b'fo`
634 delims:
635 String of delimiters to consider when finding the current key.
636
637 Returns
638 =======
639
640 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
641 ``quote`` being the quote that need to be used to close current string.
642 ``token_start`` the position where the replacement should start occurring,
643 ``matches`` a list of replacement/completion
644
645 """
627 if not prefix:
646 if not prefix:
628 return None, 0, [repr(k) for k in keys
647 return None, 0, [repr(k) for k in keys
629 if isinstance(k, (str, bytes))]
648 if isinstance(k, (str, bytes))]
630 quote_match = re.search('["\']', prefix)
649 quote_match = re.search('["\']', prefix)
631 quote = quote_match.group()
650 quote = quote_match.group()
632 try:
651 try:
633 prefix_str = eval(prefix + quote, {})
652 prefix_str = eval(prefix + quote, {})
634 except Exception:
653 except Exception:
635 return None, 0, []
654 return None, 0, []
636
655
637 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
656 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
638 token_match = re.search(pattern, prefix, re.UNICODE)
657 token_match = re.search(pattern, prefix, re.UNICODE)
639 token_start = token_match.start()
658 token_start = token_match.start()
640 token_prefix = token_match.group()
659 token_prefix = token_match.group()
641
660
642 # TODO: support bytes in Py3k
643 matched = []
661 matched = []
644 for key in keys:
662 for key in keys:
645 try:
663 try:
646 if not key.startswith(prefix_str):
664 if not key.startswith(prefix_str):
647 continue
665 continue
648 except (AttributeError, TypeError, UnicodeError):
666 except (AttributeError, TypeError, UnicodeError):
649 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
667 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
650 continue
668 continue
651
669
652 # reformat remainder of key to begin with prefix
670 # reformat remainder of key to begin with prefix
653 rem = key[len(prefix_str):]
671 rem = key[len(prefix_str):]
654 # force repr wrapped in '
672 # force repr wrapped in '
655 rem_repr = repr(rem + '"')
673 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
656 if rem_repr.startswith('u') and prefix[0] not in 'uU':
674 if rem_repr.startswith('u') and prefix[0] not in 'uU':
657 # Found key is unicode, but prefix is Py2 string.
675 # Found key is unicode, but prefix is Py2 string.
658 # Therefore attempt to interpret key as string.
676 # Therefore attempt to interpret key as string.
659 try:
677 try:
660 rem_repr = repr(rem.encode('ascii') + '"')
678 rem_repr = repr(rem.encode('ascii') + '"')
661 except UnicodeEncodeError:
679 except UnicodeEncodeError:
662 continue
680 continue
663
681
664 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
682 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
665 if quote == '"':
683 if quote == '"':
666 # The entered prefix is quoted with ",
684 # The entered prefix is quoted with ",
667 # but the match is quoted with '.
685 # but the match is quoted with '.
668 # A contained " hence needs escaping for comparison:
686 # A contained " hence needs escaping for comparison:
669 rem_repr = rem_repr.replace('"', '\\"')
687 rem_repr = rem_repr.replace('"', '\\"')
670
688
671 # then reinsert prefix from start of token
689 # then reinsert prefix from start of token
672 matched.append('%s%s' % (token_prefix, rem_repr))
690 matched.append('%s%s' % (token_prefix, rem_repr))
673 return quote, token_start, matched
691 return quote, token_start, matched
674
692
675
693
676 def cursor_to_position(text:int, line:int, column:int)->int:
694 def cursor_to_position(text:int, line:int, column:int)->int:
677 """
695 """
678
696
679 Convert the (line,column) position of the cursor in text to an offset in a
697 Convert the (line,column) position of the cursor in text to an offset in a
680 string.
698 string.
681
699
682 Parameter
700 Parameter
683 ---------
701 ---------
684
702
685 text : str
703 text : str
686 The text in which to calculate the cursor offset
704 The text in which to calculate the cursor offset
687 line : int
705 line : int
688 Line of the cursor; 0-indexed
706 Line of the cursor; 0-indexed
689 column : int
707 column : int
690 Column of the cursor 0-indexed
708 Column of the cursor 0-indexed
691
709
692 Return
710 Return
693 ------
711 ------
694 Position of the cursor in ``text``, 0-indexed.
712 Position of the cursor in ``text``, 0-indexed.
695
713
696 See Also
714 See Also
697 --------
715 --------
698 position_to_cursor: reciprocal of this function
716 position_to_cursor: reciprocal of this function
699
717
700 """
718 """
701 lines = text.split('\n')
719 lines = text.split('\n')
702 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
720 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
703
721
704 return sum(len(l) + 1 for l in lines[:line]) + column
722 return sum(len(l) + 1 for l in lines[:line]) + column
705
723
706 def position_to_cursor(text:str, offset:int)->(int, int):
724 def position_to_cursor(text:str, offset:int)->(int, int):
707 """
725 """
708 Convert the position of the cursor in text (0 indexed) to a line
726 Convert the position of the cursor in text (0 indexed) to a line
709 number(0-indexed) and a column number (0-indexed) pair
727 number(0-indexed) and a column number (0-indexed) pair
710
728
711 Position should be a valid position in ``text``.
729 Position should be a valid position in ``text``.
712
730
713 Parameter
731 Parameter
714 ---------
732 ---------
715
733
716 text : str
734 text : str
717 The text in which to calculate the cursor offset
735 The text in which to calculate the cursor offset
718 offset : int
736 offset : int
719 Position of the cursor in ``text``, 0-indexed.
737 Position of the cursor in ``text``, 0-indexed.
720
738
721 Return
739 Return
722 ------
740 ------
723 (line, column) : (int, int)
741 (line, column) : (int, int)
724 Line of the cursor; 0-indexed, column of the cursor 0-indexed
742 Line of the cursor; 0-indexed, column of the cursor 0-indexed
725
743
726
744
727 See Also
745 See Also
728 --------
746 --------
729 cursor_to_position : reciprocal of this function
747 cursor_to_position : reciprocal of this function
730
748
731
749
732 """
750 """
733
751
734 assert 0 < offset <= len(text) , "0 < %s <= %s" % (offset , len(text))
752 assert 0 < offset <= len(text) , "0 < %s <= %s" % (offset , len(text))
735
753
736 before = text[:offset]
754 before = text[:offset]
737 blines = before.split('\n') # ! splitnes trim trailing \n
755 blines = before.split('\n') # ! splitnes trim trailing \n
738 line = before.count('\n')
756 line = before.count('\n')
739 col = len(blines[-1])
757 col = len(blines[-1])
740 return line, col
758 return line, col
741
759
742
760
743 def _safe_isinstance(obj, module, class_name):
761 def _safe_isinstance(obj, module, class_name):
744 """Checks if obj is an instance of module.class_name if loaded
762 """Checks if obj is an instance of module.class_name if loaded
745 """
763 """
746 return (module in sys.modules and
764 return (module in sys.modules and
747 isinstance(obj, getattr(import_module(module), class_name)))
765 isinstance(obj, getattr(import_module(module), class_name)))
748
766
749
767
750 def back_unicode_name_matches(text):
768 def back_unicode_name_matches(text):
751 u"""Match unicode characters back to unicode name
769 u"""Match unicode characters back to unicode name
752
770
753 This does ``β˜ƒ`` -> ``\\snowman``
771 This does ``β˜ƒ`` -> ``\\snowman``
754
772
755 Note that snowman is not a valid python3 combining character but will be expanded.
773 Note that snowman is not a valid python3 combining character but will be expanded.
756 Though it will not recombine back to the snowman character by the completion machinery.
774 Though it will not recombine back to the snowman character by the completion machinery.
757
775
758 This will not either back-complete standard sequences like \\n, \\b ...
776 This will not either back-complete standard sequences like \\n, \\b ...
759
777
760 Used on Python 3 only.
778 Used on Python 3 only.
761 """
779 """
762 if len(text)<2:
780 if len(text)<2:
763 return u'', ()
781 return u'', ()
764 maybe_slash = text[-2]
782 maybe_slash = text[-2]
765 if maybe_slash != '\\':
783 if maybe_slash != '\\':
766 return u'', ()
784 return u'', ()
767
785
768 char = text[-1]
786 char = text[-1]
769 # no expand on quote for completion in strings.
787 # no expand on quote for completion in strings.
770 # nor backcomplete standard ascii keys
788 # nor backcomplete standard ascii keys
771 if char in string.ascii_letters or char in ['"',"'"]:
789 if char in string.ascii_letters or char in ['"',"'"]:
772 return u'', ()
790 return u'', ()
773 try :
791 try :
774 unic = unicodedata.name(char)
792 unic = unicodedata.name(char)
775 return '\\'+char,['\\'+unic]
793 return '\\'+char,['\\'+unic]
776 except KeyError:
794 except KeyError:
777 pass
795 pass
778 return u'', ()
796 return u'', ()
779
797
780 def back_latex_name_matches(text:str):
798 def back_latex_name_matches(text:str):
781 """Match latex characters back to unicode name
799 """Match latex characters back to unicode name
782
800
783 This does ``\\β„΅`` -> ``\\aleph``
801 This does ``\\β„΅`` -> ``\\aleph``
784
802
785 Used on Python 3 only.
803 Used on Python 3 only.
786 """
804 """
787 if len(text)<2:
805 if len(text)<2:
788 return u'', ()
806 return u'', ()
789 maybe_slash = text[-2]
807 maybe_slash = text[-2]
790 if maybe_slash != '\\':
808 if maybe_slash != '\\':
791 return u'', ()
809 return u'', ()
792
810
793
811
794 char = text[-1]
812 char = text[-1]
795 # no expand on quote for completion in strings.
813 # no expand on quote for completion in strings.
796 # nor backcomplete standard ascii keys
814 # nor backcomplete standard ascii keys
797 if char in string.ascii_letters or char in ['"',"'"]:
815 if char in string.ascii_letters or char in ['"',"'"]:
798 return u'', ()
816 return u'', ()
799 try :
817 try :
800 latex = reverse_latex_symbol[char]
818 latex = reverse_latex_symbol[char]
801 # '\\' replace the \ as well
819 # '\\' replace the \ as well
802 return '\\'+char,[latex]
820 return '\\'+char,[latex]
803 except KeyError:
821 except KeyError:
804 pass
822 pass
805 return u'', ()
823 return u'', ()
806
824
807
825
808 class IPCompleter(Completer):
826 class IPCompleter(Completer):
809 """Extension of the completer class with IPython-specific features"""
827 """Extension of the completer class with IPython-specific features"""
810
828
811 @observe('greedy')
829 @observe('greedy')
812 def _greedy_changed(self, change):
830 def _greedy_changed(self, change):
813 """update the splitter and readline delims when greedy is changed"""
831 """update the splitter and readline delims when greedy is changed"""
814 if change['new']:
832 if change['new']:
815 self.splitter.delims = GREEDY_DELIMS
833 self.splitter.delims = GREEDY_DELIMS
816 else:
834 else:
817 self.splitter.delims = DELIMS
835 self.splitter.delims = DELIMS
818
836
819 merge_completions = Bool(True,
837 merge_completions = Bool(True,
820 help="""Whether to merge completion results into a single list
838 help="""Whether to merge completion results into a single list
821
839
822 If False, only the completion results from the first non-empty
840 If False, only the completion results from the first non-empty
823 completer will be returned.
841 completer will be returned.
824 """
842 """
825 ).tag(config=True)
843 ).tag(config=True)
826 omit__names = Enum((0,1,2), default_value=2,
844 omit__names = Enum((0,1,2), default_value=2,
827 help="""Instruct the completer to omit private method names
845 help="""Instruct the completer to omit private method names
828
846
829 Specifically, when completing on ``object.<tab>``.
847 Specifically, when completing on ``object.<tab>``.
830
848
831 When 2 [default]: all names that start with '_' will be excluded.
849 When 2 [default]: all names that start with '_' will be excluded.
832
850
833 When 1: all 'magic' names (``__foo__``) will be excluded.
851 When 1: all 'magic' names (``__foo__``) will be excluded.
834
852
835 When 0: nothing will be excluded.
853 When 0: nothing will be excluded.
836 """
854 """
837 ).tag(config=True)
855 ).tag(config=True)
838 limit_to__all__ = Bool(False,
856 limit_to__all__ = Bool(False,
839 help="""
857 help="""
840 DEPRECATED as of version 5.0.
858 DEPRECATED as of version 5.0.
841
859
842 Instruct the completer to use __all__ for the completion
860 Instruct the completer to use __all__ for the completion
843
861
844 Specifically, when completing on ``object.<tab>``.
862 Specifically, when completing on ``object.<tab>``.
845
863
846 When True: only those names in obj.__all__ will be included.
864 When True: only those names in obj.__all__ will be included.
847
865
848 When False [default]: the __all__ attribute is ignored
866 When False [default]: the __all__ attribute is ignored
849 """,
867 """,
850 ).tag(config=True)
868 ).tag(config=True)
851
869
852 @observe('limit_to__all__')
870 @observe('limit_to__all__')
853 def _limit_to_all_changed(self, change):
871 def _limit_to_all_changed(self, change):
854 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
872 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
855 'value has been deprecated since IPython 5.0, will be made to have '
873 'value has been deprecated since IPython 5.0, will be made to have '
856 'no effects and then removed in future version of IPython.',
874 'no effects and then removed in future version of IPython.',
857 UserWarning)
875 UserWarning)
858
876
859 def __init__(self, shell=None, namespace=None, global_namespace=None,
877 def __init__(self, shell=None, namespace=None, global_namespace=None,
860 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
878 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
861 """IPCompleter() -> completer
879 """IPCompleter() -> completer
862
880
863 Return a completer object.
881 Return a completer object.
864
882
865 Parameters
883 Parameters
866 ----------
884 ----------
867
885
868 shell
886 shell
869 a pointer to the ipython shell itself. This is needed
887 a pointer to the ipython shell itself. This is needed
870 because this completer knows about magic functions, and those can
888 because this completer knows about magic functions, and those can
871 only be accessed via the ipython instance.
889 only be accessed via the ipython instance.
872
890
873 namespace : dict, optional
891 namespace : dict, optional
874 an optional dict where completions are performed.
892 an optional dict where completions are performed.
875
893
876 global_namespace : dict, optional
894 global_namespace : dict, optional
877 secondary optional dict for completions, to
895 secondary optional dict for completions, to
878 handle cases (such as IPython embedded inside functions) where
896 handle cases (such as IPython embedded inside functions) where
879 both Python scopes are visible.
897 both Python scopes are visible.
880
898
881 use_readline : bool, optional
899 use_readline : bool, optional
882 DEPRECATED, ignored since IPython 6.0, will have no effects
900 DEPRECATED, ignored since IPython 6.0, will have no effects
883 """
901 """
884
902
885 self.magic_escape = ESC_MAGIC
903 self.magic_escape = ESC_MAGIC
886 self.splitter = CompletionSplitter()
904 self.splitter = CompletionSplitter()
887
905
888 if use_readline is not _deprecation_readline_sentinel:
906 if use_readline is not _deprecation_readline_sentinel:
889 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
907 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
890 DeprecationWarning, stacklevel=2)
908 DeprecationWarning, stacklevel=2)
891
909
892 # _greedy_changed() depends on splitter and readline being defined:
910 # _greedy_changed() depends on splitter and readline being defined:
893 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
911 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
894 config=config, **kwargs)
912 config=config, **kwargs)
895
913
896 # List where completion matches will be stored
914 # List where completion matches will be stored
897 self.matches = []
915 self.matches = []
898 self.shell = shell
916 self.shell = shell
899 # Regexp to split filenames with spaces in them
917 # Regexp to split filenames with spaces in them
900 self.space_name_re = re.compile(r'([^\\] )')
918 self.space_name_re = re.compile(r'([^\\] )')
901 # Hold a local ref. to glob.glob for speed
919 # Hold a local ref. to glob.glob for speed
902 self.glob = glob.glob
920 self.glob = glob.glob
903
921
904 # Determine if we are running on 'dumb' terminals, like (X)Emacs
922 # Determine if we are running on 'dumb' terminals, like (X)Emacs
905 # buffers, to avoid completion problems.
923 # buffers, to avoid completion problems.
906 term = os.environ.get('TERM','xterm')
924 term = os.environ.get('TERM','xterm')
907 self.dumb_terminal = term in ['dumb','emacs']
925 self.dumb_terminal = term in ['dumb','emacs']
908
926
909 # Special handling of backslashes needed in win32 platforms
927 # Special handling of backslashes needed in win32 platforms
910 if sys.platform == "win32":
928 if sys.platform == "win32":
911 self.clean_glob = self._clean_glob_win32
929 self.clean_glob = self._clean_glob_win32
912 else:
930 else:
913 self.clean_glob = self._clean_glob
931 self.clean_glob = self._clean_glob
914
932
915 #regexp to parse docstring for function signature
933 #regexp to parse docstring for function signature
916 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
934 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
917 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
935 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
918 #use this if positional argument name is also needed
936 #use this if positional argument name is also needed
919 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
937 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
920
938
921 # All active matcher routines for completion
939 # All active matcher routines for completion
922 self.matchers = [
940 self.matchers = [
923 self.python_matches,
941 self.python_matches,
924 self.file_matches,
942 self.file_matches,
925 self.magic_matches,
943 self.magic_matches,
926 self.python_func_kw_matches,
944 self.python_func_kw_matches,
927 self.dict_key_matches,
945 self.dict_key_matches,
928 ]
946 ]
929
947
930 # This is set externally by InteractiveShell
948 # This is set externally by InteractiveShell
931 self.custom_completers = None
949 self.custom_completers = None
932
950
933 def all_completions(self, text):
951 def all_completions(self, text):
934 """
952 """
935 Wrapper around the complete method for the benefit of emacs.
953 Wrapper around the complete method for the benefit of emacs.
936 """
954 """
937 return self.complete(text)[1]
955 return self.complete(text)[1]
938
956
939 def _clean_glob(self, text):
957 def _clean_glob(self, text):
940 return self.glob("%s*" % text)
958 return self.glob("%s*" % text)
941
959
942 def _clean_glob_win32(self,text):
960 def _clean_glob_win32(self,text):
943 return [f.replace("\\","/")
961 return [f.replace("\\","/")
944 for f in self.glob("%s*" % text)]
962 for f in self.glob("%s*" % text)]
945
963
946 def file_matches(self, text):
964 def file_matches(self, text):
947 """Match filenames, expanding ~USER type strings.
965 """Match filenames, expanding ~USER type strings.
948
966
949 Most of the seemingly convoluted logic in this completer is an
967 Most of the seemingly convoluted logic in this completer is an
950 attempt to handle filenames with spaces in them. And yet it's not
968 attempt to handle filenames with spaces in them. And yet it's not
951 quite perfect, because Python's readline doesn't expose all of the
969 quite perfect, because Python's readline doesn't expose all of the
952 GNU readline details needed for this to be done correctly.
970 GNU readline details needed for this to be done correctly.
953
971
954 For a filename with a space in it, the printed completions will be
972 For a filename with a space in it, the printed completions will be
955 only the parts after what's already been typed (instead of the
973 only the parts after what's already been typed (instead of the
956 full completions, as is normally done). I don't think with the
974 full completions, as is normally done). I don't think with the
957 current (as of Python 2.3) Python readline it's possible to do
975 current (as of Python 2.3) Python readline it's possible to do
958 better."""
976 better."""
959
977
960 # chars that require escaping with backslash - i.e. chars
978 # chars that require escaping with backslash - i.e. chars
961 # that readline treats incorrectly as delimiters, but we
979 # that readline treats incorrectly as delimiters, but we
962 # don't want to treat as delimiters in filename matching
980 # don't want to treat as delimiters in filename matching
963 # when escaped with backslash
981 # when escaped with backslash
964 if text.startswith('!'):
982 if text.startswith('!'):
965 text = text[1:]
983 text = text[1:]
966 text_prefix = u'!'
984 text_prefix = u'!'
967 else:
985 else:
968 text_prefix = u''
986 text_prefix = u''
969
987
970 text_until_cursor = self.text_until_cursor
988 text_until_cursor = self.text_until_cursor
971 # track strings with open quotes
989 # track strings with open quotes
972 open_quotes = has_open_quotes(text_until_cursor)
990 open_quotes = has_open_quotes(text_until_cursor)
973
991
974 if '(' in text_until_cursor or '[' in text_until_cursor:
992 if '(' in text_until_cursor or '[' in text_until_cursor:
975 lsplit = text
993 lsplit = text
976 else:
994 else:
977 try:
995 try:
978 # arg_split ~ shlex.split, but with unicode bugs fixed by us
996 # arg_split ~ shlex.split, but with unicode bugs fixed by us
979 lsplit = arg_split(text_until_cursor)[-1]
997 lsplit = arg_split(text_until_cursor)[-1]
980 except ValueError:
998 except ValueError:
981 # typically an unmatched ", or backslash without escaped char.
999 # typically an unmatched ", or backslash without escaped char.
982 if open_quotes:
1000 if open_quotes:
983 lsplit = text_until_cursor.split(open_quotes)[-1]
1001 lsplit = text_until_cursor.split(open_quotes)[-1]
984 else:
1002 else:
985 return []
1003 return []
986 except IndexError:
1004 except IndexError:
987 # tab pressed on empty line
1005 # tab pressed on empty line
988 lsplit = ""
1006 lsplit = ""
989
1007
990 if not open_quotes and lsplit != protect_filename(lsplit):
1008 if not open_quotes and lsplit != protect_filename(lsplit):
991 # if protectables are found, do matching on the whole escaped name
1009 # if protectables are found, do matching on the whole escaped name
992 has_protectables = True
1010 has_protectables = True
993 text0,text = text,lsplit
1011 text0,text = text,lsplit
994 else:
1012 else:
995 has_protectables = False
1013 has_protectables = False
996 text = os.path.expanduser(text)
1014 text = os.path.expanduser(text)
997
1015
998 if text == "":
1016 if text == "":
999 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
1017 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
1000
1018
1001 # Compute the matches from the filesystem
1019 # Compute the matches from the filesystem
1002 if sys.platform == 'win32':
1020 if sys.platform == 'win32':
1003 m0 = self.clean_glob(text)
1021 m0 = self.clean_glob(text)
1004 else:
1022 else:
1005 m0 = self.clean_glob(text.replace('\\', ''))
1023 m0 = self.clean_glob(text.replace('\\', ''))
1006
1024
1007 if has_protectables:
1025 if has_protectables:
1008 # If we had protectables, we need to revert our changes to the
1026 # If we had protectables, we need to revert our changes to the
1009 # beginning of filename so that we don't double-write the part
1027 # beginning of filename so that we don't double-write the part
1010 # of the filename we have so far
1028 # of the filename we have so far
1011 len_lsplit = len(lsplit)
1029 len_lsplit = len(lsplit)
1012 matches = [text_prefix + text0 +
1030 matches = [text_prefix + text0 +
1013 protect_filename(f[len_lsplit:]) for f in m0]
1031 protect_filename(f[len_lsplit:]) for f in m0]
1014 else:
1032 else:
1015 if open_quotes:
1033 if open_quotes:
1016 # if we have a string with an open quote, we don't need to
1034 # if we have a string with an open quote, we don't need to
1017 # protect the names at all (and we _shouldn't_, as it
1035 # protect the names at all (and we _shouldn't_, as it
1018 # would cause bugs when the filesystem call is made).
1036 # would cause bugs when the filesystem call is made).
1019 matches = m0
1037 matches = m0
1020 else:
1038 else:
1021 matches = [text_prefix +
1039 matches = [text_prefix +
1022 protect_filename(f) for f in m0]
1040 protect_filename(f) for f in m0]
1023
1041
1024 # Mark directories in input list by appending '/' to their names.
1042 # Mark directories in input list by appending '/' to their names.
1025 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
1043 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
1026
1044
1027 def magic_matches(self, text):
1045 def magic_matches(self, text):
1028 """Match magics"""
1046 """Match magics"""
1029 # Get all shell magics now rather than statically, so magics loaded at
1047 # Get all shell magics now rather than statically, so magics loaded at
1030 # runtime show up too.
1048 # runtime show up too.
1031 lsm = self.shell.magics_manager.lsmagic()
1049 lsm = self.shell.magics_manager.lsmagic()
1032 line_magics = lsm['line']
1050 line_magics = lsm['line']
1033 cell_magics = lsm['cell']
1051 cell_magics = lsm['cell']
1034 pre = self.magic_escape
1052 pre = self.magic_escape
1035 pre2 = pre+pre
1053 pre2 = pre+pre
1036
1054
1037 # Completion logic:
1055 # Completion logic:
1038 # - user gives %%: only do cell magics
1056 # - user gives %%: only do cell magics
1039 # - user gives %: do both line and cell magics
1057 # - user gives %: do both line and cell magics
1040 # - no prefix: do both
1058 # - no prefix: do both
1041 # In other words, line magics are skipped if the user gives %% explicitly
1059 # In other words, line magics are skipped if the user gives %% explicitly
1042 bare_text = text.lstrip(pre)
1060 bare_text = text.lstrip(pre)
1043 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
1061 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
1044 if not text.startswith(pre2):
1062 if not text.startswith(pre2):
1045 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
1063 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
1046 return [cast_unicode_py2(c) for c in comp]
1064 return [cast_unicode_py2(c) for c in comp]
1047
1065
1048 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str):
1066 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str):
1049 """
1067 """
1050
1068
1051 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1069 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1052 cursor position.
1070 cursor position.
1053
1071
1054 Parameters
1072 Parameters
1055 ----------
1073 ----------
1056 cursor_column : int
1074 cursor_column : int
1057 column position of the cursor in ``text``, 0-indexed.
1075 column position of the cursor in ``text``, 0-indexed.
1058 cursor_line : int
1076 cursor_line : int
1059 line position of the cursor in ``text``, 0-indexed
1077 line position of the cursor in ``text``, 0-indexed
1060 text : str
1078 text : str
1061 text to complete
1079 text to complete
1062
1080
1063 Debugging
1081 Debugging
1064 ---------
1082 ---------
1065
1083
1066 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1084 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1067 object containing a string with the Jedi debug information attached.
1085 object containing a string with the Jedi debug information attached.
1068 """
1086 """
1069 namespaces = [self.namespace]
1087 namespaces = [self.namespace]
1070 if self.global_namespace is not None:
1088 if self.global_namespace is not None:
1071 namespaces.append(self.global_namespace)
1089 namespaces.append(self.global_namespace)
1072
1090
1073 # cursor_pos is an it, jedi wants line and column
1091 # cursor_pos is an it, jedi wants line and column
1074 offset = cursor_to_position(text, cursor_line, cursor_column)
1092 offset = cursor_to_position(text, cursor_line, cursor_column)
1075 if offset:
1093 if offset:
1076 pre = text[offset-1]
1094 pre = text[offset-1]
1077 completion_filter = lambda x:x
1095 completion_filter = lambda x:x
1078 if pre == '.':
1096 if pre == '.':
1079 if self.omit__names == 2:
1097 if self.omit__names == 2:
1080 completion_filter = lambda c:not c.name.startswith('_')
1098 completion_filter = lambda c:not c.name.startswith('_')
1081 elif self.omit__names == 1:
1099 elif self.omit__names == 1:
1082 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1100 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1083 elif self.omit__names == 0:
1101 elif self.omit__names == 0:
1084 completion_filter = lambda x:x
1102 completion_filter = lambda x:x
1085 else:
1103 else:
1086 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1104 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1087
1105
1088 interpreter = jedi.Interpreter(
1106 interpreter = jedi.Interpreter(
1089 text, namespaces, column=cursor_column, line=cursor_line + 1)
1107 text, namespaces, column=cursor_column, line=cursor_line + 1)
1090 try:
1108 try:
1091 return filter(completion_filter, interpreter.completions())
1109 return filter(completion_filter, interpreter.completions())
1092 except Exception as e:
1110 except Exception as e:
1093 if self.debug:
1111 if self.debug:
1094 return [_FakeJediCompletion('Opps Jedi has crash please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1112 return [_FakeJediCompletion('Opps Jedi has crash please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1095 else:
1113 else:
1096 return []
1114 return []
1097
1115
1098 def python_matches(self, text):
1116 def python_matches(self, text):
1099 """Match attributes or global python names"""
1117 """Match attributes or global python names"""
1100 if "." in text:
1118 if "." in text:
1101 try:
1119 try:
1102 matches = self.attr_matches(text)
1120 matches = self.attr_matches(text)
1103 if text.endswith('.') and self.omit__names:
1121 if text.endswith('.') and self.omit__names:
1104 if self.omit__names == 1:
1122 if self.omit__names == 1:
1105 # true if txt is _not_ a __ name, false otherwise:
1123 # true if txt is _not_ a __ name, false otherwise:
1106 no__name = (lambda txt:
1124 no__name = (lambda txt:
1107 re.match(r'.*\.__.*?__',txt) is None)
1125 re.match(r'.*\.__.*?__',txt) is None)
1108 else:
1126 else:
1109 # true if txt is _not_ a _ name, false otherwise:
1127 # true if txt is _not_ a _ name, false otherwise:
1110 no__name = (lambda txt:
1128 no__name = (lambda txt:
1111 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1129 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1112 matches = filter(no__name, matches)
1130 matches = filter(no__name, matches)
1113 except NameError:
1131 except NameError:
1114 # catches <undefined attributes>.<tab>
1132 # catches <undefined attributes>.<tab>
1115 matches = []
1133 matches = []
1116 else:
1134 else:
1117 matches = self.global_matches(text)
1135 matches = self.global_matches(text)
1118 return matches
1136 return matches
1119
1137
1120 def _default_arguments_from_docstring(self, doc):
1138 def _default_arguments_from_docstring(self, doc):
1121 """Parse the first line of docstring for call signature.
1139 """Parse the first line of docstring for call signature.
1122
1140
1123 Docstring should be of the form 'min(iterable[, key=func])\n'.
1141 Docstring should be of the form 'min(iterable[, key=func])\n'.
1124 It can also parse cython docstring of the form
1142 It can also parse cython docstring of the form
1125 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1143 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1126 """
1144 """
1127 if doc is None:
1145 if doc is None:
1128 return []
1146 return []
1129
1147
1130 #care only the firstline
1148 #care only the firstline
1131 line = doc.lstrip().splitlines()[0]
1149 line = doc.lstrip().splitlines()[0]
1132
1150
1133 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1151 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1134 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1152 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1135 sig = self.docstring_sig_re.search(line)
1153 sig = self.docstring_sig_re.search(line)
1136 if sig is None:
1154 if sig is None:
1137 return []
1155 return []
1138 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1156 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1139 sig = sig.groups()[0].split(',')
1157 sig = sig.groups()[0].split(',')
1140 ret = []
1158 ret = []
1141 for s in sig:
1159 for s in sig:
1142 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1160 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1143 ret += self.docstring_kwd_re.findall(s)
1161 ret += self.docstring_kwd_re.findall(s)
1144 return ret
1162 return ret
1145
1163
1146 def _default_arguments(self, obj):
1164 def _default_arguments(self, obj):
1147 """Return the list of default arguments of obj if it is callable,
1165 """Return the list of default arguments of obj if it is callable,
1148 or empty list otherwise."""
1166 or empty list otherwise."""
1149 call_obj = obj
1167 call_obj = obj
1150 ret = []
1168 ret = []
1151 if inspect.isbuiltin(obj):
1169 if inspect.isbuiltin(obj):
1152 pass
1170 pass
1153 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1171 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1154 if inspect.isclass(obj):
1172 if inspect.isclass(obj):
1155 #for cython embededsignature=True the constructor docstring
1173 #for cython embededsignature=True the constructor docstring
1156 #belongs to the object itself not __init__
1174 #belongs to the object itself not __init__
1157 ret += self._default_arguments_from_docstring(
1175 ret += self._default_arguments_from_docstring(
1158 getattr(obj, '__doc__', ''))
1176 getattr(obj, '__doc__', ''))
1159 # for classes, check for __init__,__new__
1177 # for classes, check for __init__,__new__
1160 call_obj = (getattr(obj, '__init__', None) or
1178 call_obj = (getattr(obj, '__init__', None) or
1161 getattr(obj, '__new__', None))
1179 getattr(obj, '__new__', None))
1162 # for all others, check if they are __call__able
1180 # for all others, check if they are __call__able
1163 elif hasattr(obj, '__call__'):
1181 elif hasattr(obj, '__call__'):
1164 call_obj = obj.__call__
1182 call_obj = obj.__call__
1165 ret += self._default_arguments_from_docstring(
1183 ret += self._default_arguments_from_docstring(
1166 getattr(call_obj, '__doc__', ''))
1184 getattr(call_obj, '__doc__', ''))
1167
1185
1168 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1186 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1169 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1187 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1170
1188
1171 try:
1189 try:
1172 sig = inspect.signature(call_obj)
1190 sig = inspect.signature(call_obj)
1173 ret.extend(k for k, v in sig.parameters.items() if
1191 ret.extend(k for k, v in sig.parameters.items() if
1174 v.kind in _keeps)
1192 v.kind in _keeps)
1175 except ValueError:
1193 except ValueError:
1176 pass
1194 pass
1177
1195
1178 return list(set(ret))
1196 return list(set(ret))
1179
1197
1180 def python_func_kw_matches(self,text):
1198 def python_func_kw_matches(self,text):
1181 """Match named parameters (kwargs) of the last open function"""
1199 """Match named parameters (kwargs) of the last open function"""
1182
1200
1183 if "." in text: # a parameter cannot be dotted
1201 if "." in text: # a parameter cannot be dotted
1184 return []
1202 return []
1185 try: regexp = self.__funcParamsRegex
1203 try: regexp = self.__funcParamsRegex
1186 except AttributeError:
1204 except AttributeError:
1187 regexp = self.__funcParamsRegex = re.compile(r'''
1205 regexp = self.__funcParamsRegex = re.compile(r'''
1188 '.*?(?<!\\)' | # single quoted strings or
1206 '.*?(?<!\\)' | # single quoted strings or
1189 ".*?(?<!\\)" | # double quoted strings or
1207 ".*?(?<!\\)" | # double quoted strings or
1190 \w+ | # identifier
1208 \w+ | # identifier
1191 \S # other characters
1209 \S # other characters
1192 ''', re.VERBOSE | re.DOTALL)
1210 ''', re.VERBOSE | re.DOTALL)
1193 # 1. find the nearest identifier that comes before an unclosed
1211 # 1. find the nearest identifier that comes before an unclosed
1194 # parenthesis before the cursor
1212 # parenthesis before the cursor
1195 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1213 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1196 tokens = regexp.findall(self.text_until_cursor)
1214 tokens = regexp.findall(self.text_until_cursor)
1197 iterTokens = reversed(tokens); openPar = 0
1215 iterTokens = reversed(tokens); openPar = 0
1198
1216
1199 for token in iterTokens:
1217 for token in iterTokens:
1200 if token == ')':
1218 if token == ')':
1201 openPar -= 1
1219 openPar -= 1
1202 elif token == '(':
1220 elif token == '(':
1203 openPar += 1
1221 openPar += 1
1204 if openPar > 0:
1222 if openPar > 0:
1205 # found the last unclosed parenthesis
1223 # found the last unclosed parenthesis
1206 break
1224 break
1207 else:
1225 else:
1208 return []
1226 return []
1209 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1227 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1210 ids = []
1228 ids = []
1211 isId = re.compile(r'\w+$').match
1229 isId = re.compile(r'\w+$').match
1212
1230
1213 while True:
1231 while True:
1214 try:
1232 try:
1215 ids.append(next(iterTokens))
1233 ids.append(next(iterTokens))
1216 if not isId(ids[-1]):
1234 if not isId(ids[-1]):
1217 ids.pop(); break
1235 ids.pop(); break
1218 if not next(iterTokens) == '.':
1236 if not next(iterTokens) == '.':
1219 break
1237 break
1220 except StopIteration:
1238 except StopIteration:
1221 break
1239 break
1222
1240
1223 # Find all named arguments already assigned to, as to avoid suggesting
1241 # Find all named arguments already assigned to, as to avoid suggesting
1224 # them again
1242 # them again
1225 usedNamedArgs = set()
1243 usedNamedArgs = set()
1226 par_level = -1
1244 par_level = -1
1227 for token, next_token in zip(tokens, tokens[1:]):
1245 for token, next_token in zip(tokens, tokens[1:]):
1228 if token == '(':
1246 if token == '(':
1229 par_level += 1
1247 par_level += 1
1230 elif token == ')':
1248 elif token == ')':
1231 par_level -= 1
1249 par_level -= 1
1232
1250
1233 if par_level != 0:
1251 if par_level != 0:
1234 continue
1252 continue
1235
1253
1236 if next_token != '=':
1254 if next_token != '=':
1237 continue
1255 continue
1238
1256
1239 usedNamedArgs.add(token)
1257 usedNamedArgs.add(token)
1240
1258
1241 # lookup the candidate callable matches either using global_matches
1259 # lookup the candidate callable matches either using global_matches
1242 # or attr_matches for dotted names
1260 # or attr_matches for dotted names
1243 if len(ids) == 1:
1261 if len(ids) == 1:
1244 callableMatches = self.global_matches(ids[0])
1262 callableMatches = self.global_matches(ids[0])
1245 else:
1263 else:
1246 callableMatches = self.attr_matches('.'.join(ids[::-1]))
1264 callableMatches = self.attr_matches('.'.join(ids[::-1]))
1247 argMatches = []
1265 argMatches = []
1248 for callableMatch in callableMatches:
1266 for callableMatch in callableMatches:
1249 try:
1267 try:
1250 namedArgs = self._default_arguments(eval(callableMatch,
1268 namedArgs = self._default_arguments(eval(callableMatch,
1251 self.namespace))
1269 self.namespace))
1252 except:
1270 except:
1253 continue
1271 continue
1254
1272
1255 # Remove used named arguments from the list, no need to show twice
1273 # Remove used named arguments from the list, no need to show twice
1256 for namedArg in set(namedArgs) - usedNamedArgs:
1274 for namedArg in set(namedArgs) - usedNamedArgs:
1257 if namedArg.startswith(text):
1275 if namedArg.startswith(text):
1258 argMatches.append(u"%s=" %namedArg)
1276 argMatches.append(u"%s=" %namedArg)
1259 return argMatches
1277 return argMatches
1260
1278
1261 def dict_key_matches(self, text):
1279 def dict_key_matches(self, text):
1262 "Match string keys in a dictionary, after e.g. 'foo[' "
1280 "Match string keys in a dictionary, after e.g. 'foo[' "
1263 def get_keys(obj):
1281 def get_keys(obj):
1264 # Objects can define their own completions by defining an
1282 # Objects can define their own completions by defining an
1265 # _ipy_key_completions_() method.
1283 # _ipy_key_completions_() method.
1266 method = get_real_method(obj, '_ipython_key_completions_')
1284 method = get_real_method(obj, '_ipython_key_completions_')
1267 if method is not None:
1285 if method is not None:
1268 return method()
1286 return method()
1269
1287
1270 # Special case some common in-memory dict-like types
1288 # Special case some common in-memory dict-like types
1271 if isinstance(obj, dict) or\
1289 if isinstance(obj, dict) or\
1272 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1290 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1273 try:
1291 try:
1274 return list(obj.keys())
1292 return list(obj.keys())
1275 except Exception:
1293 except Exception:
1276 return []
1294 return []
1277 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1295 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1278 _safe_isinstance(obj, 'numpy', 'void'):
1296 _safe_isinstance(obj, 'numpy', 'void'):
1279 return obj.dtype.names or []
1297 return obj.dtype.names or []
1280 return []
1298 return []
1281
1299
1282 try:
1300 try:
1283 regexps = self.__dict_key_regexps
1301 regexps = self.__dict_key_regexps
1284 except AttributeError:
1302 except AttributeError:
1285 dict_key_re_fmt = r'''(?x)
1303 dict_key_re_fmt = r'''(?x)
1286 ( # match dict-referring expression wrt greedy setting
1304 ( # match dict-referring expression wrt greedy setting
1287 %s
1305 %s
1288 )
1306 )
1289 \[ # open bracket
1307 \[ # open bracket
1290 \s* # and optional whitespace
1308 \s* # and optional whitespace
1291 ([uUbB]? # string prefix (r not handled)
1309 ([uUbB]? # string prefix (r not handled)
1292 (?: # unclosed string
1310 (?: # unclosed string
1293 '(?:[^']|(?<!\\)\\')*
1311 '(?:[^']|(?<!\\)\\')*
1294 |
1312 |
1295 "(?:[^"]|(?<!\\)\\")*
1313 "(?:[^"]|(?<!\\)\\")*
1296 )
1314 )
1297 )?
1315 )?
1298 $
1316 $
1299 '''
1317 '''
1300 regexps = self.__dict_key_regexps = {
1318 regexps = self.__dict_key_regexps = {
1301 False: re.compile(dict_key_re_fmt % '''
1319 False: re.compile(dict_key_re_fmt % '''
1302 # identifiers separated by .
1320 # identifiers separated by .
1303 (?!\d)\w+
1321 (?!\d)\w+
1304 (?:\.(?!\d)\w+)*
1322 (?:\.(?!\d)\w+)*
1305 '''),
1323 '''),
1306 True: re.compile(dict_key_re_fmt % '''
1324 True: re.compile(dict_key_re_fmt % '''
1307 .+
1325 .+
1308 ''')
1326 ''')
1309 }
1327 }
1310
1328
1311 match = regexps[self.greedy].search(self.text_until_cursor)
1329 match = regexps[self.greedy].search(self.text_until_cursor)
1312 if match is None:
1330 if match is None:
1313 return []
1331 return []
1314
1332
1315 expr, prefix = match.groups()
1333 expr, prefix = match.groups()
1316 try:
1334 try:
1317 obj = eval(expr, self.namespace)
1335 obj = eval(expr, self.namespace)
1318 except Exception:
1336 except Exception:
1319 try:
1337 try:
1320 obj = eval(expr, self.global_namespace)
1338 obj = eval(expr, self.global_namespace)
1321 except Exception:
1339 except Exception:
1322 return []
1340 return []
1323
1341
1324 keys = get_keys(obj)
1342 keys = get_keys(obj)
1325 if not keys:
1343 if not keys:
1326 return keys
1344 return keys
1327 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1345 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1328 if not matches:
1346 if not matches:
1329 return matches
1347 return matches
1330
1348
1331 # get the cursor position of
1349 # get the cursor position of
1332 # - the text being completed
1350 # - the text being completed
1333 # - the start of the key text
1351 # - the start of the key text
1334 # - the start of the completion
1352 # - the start of the completion
1335 text_start = len(self.text_until_cursor) - len(text)
1353 text_start = len(self.text_until_cursor) - len(text)
1336 if prefix:
1354 if prefix:
1337 key_start = match.start(2)
1355 key_start = match.start(2)
1338 completion_start = key_start + token_offset
1356 completion_start = key_start + token_offset
1339 else:
1357 else:
1340 key_start = completion_start = match.end()
1358 key_start = completion_start = match.end()
1341
1359
1342 # grab the leading prefix, to make sure all completions start with `text`
1360 # grab the leading prefix, to make sure all completions start with `text`
1343 if text_start > key_start:
1361 if text_start > key_start:
1344 leading = ''
1362 leading = ''
1345 else:
1363 else:
1346 leading = text[text_start:completion_start]
1364 leading = text[text_start:completion_start]
1347
1365
1348 # the index of the `[` character
1366 # the index of the `[` character
1349 bracket_idx = match.end(1)
1367 bracket_idx = match.end(1)
1350
1368
1351 # append closing quote and bracket as appropriate
1369 # append closing quote and bracket as appropriate
1352 # this is *not* appropriate if the opening quote or bracket is outside
1370 # this is *not* appropriate if the opening quote or bracket is outside
1353 # the text given to this method
1371 # the text given to this method
1354 suf = ''
1372 suf = ''
1355 continuation = self.line_buffer[len(self.text_until_cursor):]
1373 continuation = self.line_buffer[len(self.text_until_cursor):]
1356 if key_start > text_start and closing_quote:
1374 if key_start > text_start and closing_quote:
1357 # quotes were opened inside text, maybe close them
1375 # quotes were opened inside text, maybe close them
1358 if continuation.startswith(closing_quote):
1376 if continuation.startswith(closing_quote):
1359 continuation = continuation[len(closing_quote):]
1377 continuation = continuation[len(closing_quote):]
1360 else:
1378 else:
1361 suf += closing_quote
1379 suf += closing_quote
1362 if bracket_idx > text_start:
1380 if bracket_idx > text_start:
1363 # brackets were opened inside text, maybe close them
1381 # brackets were opened inside text, maybe close them
1364 if not continuation.startswith(']'):
1382 if not continuation.startswith(']'):
1365 suf += ']'
1383 suf += ']'
1366
1384
1367 return [leading + k + suf for k in matches]
1385 return [leading + k + suf for k in matches]
1368
1386
1369 def unicode_name_matches(self, text):
1387 def unicode_name_matches(self, text):
1370 u"""Match Latex-like syntax for unicode characters base
1388 u"""Match Latex-like syntax for unicode characters base
1371 on the name of the character.
1389 on the name of the character.
1372
1390
1373 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
1391 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
1374
1392
1375 Works only on valid python 3 identifier, or on combining characters that
1393 Works only on valid python 3 identifier, or on combining characters that
1376 will combine to form a valid identifier.
1394 will combine to form a valid identifier.
1377
1395
1378 Used on Python 3 only.
1396 Used on Python 3 only.
1379 """
1397 """
1380 slashpos = text.rfind('\\')
1398 slashpos = text.rfind('\\')
1381 if slashpos > -1:
1399 if slashpos > -1:
1382 s = text[slashpos+1:]
1400 s = text[slashpos+1:]
1383 try :
1401 try :
1384 unic = unicodedata.lookup(s)
1402 unic = unicodedata.lookup(s)
1385 # allow combining chars
1403 # allow combining chars
1386 if ('a'+unic).isidentifier():
1404 if ('a'+unic).isidentifier():
1387 return '\\'+s,[unic]
1405 return '\\'+s,[unic]
1388 except KeyError:
1406 except KeyError:
1389 pass
1407 pass
1390 return u'', []
1408 return u'', []
1391
1409
1392
1410
1393 def latex_matches(self, text):
1411 def latex_matches(self, text):
1394 u"""Match Latex syntax for unicode characters.
1412 u"""Match Latex syntax for unicode characters.
1395
1413
1396 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
1414 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
1397
1415
1398 Used on Python 3 only.
1416 Used on Python 3 only.
1399 """
1417 """
1400 slashpos = text.rfind('\\')
1418 slashpos = text.rfind('\\')
1401 if slashpos > -1:
1419 if slashpos > -1:
1402 s = text[slashpos:]
1420 s = text[slashpos:]
1403 if s in latex_symbols:
1421 if s in latex_symbols:
1404 # Try to complete a full latex symbol to unicode
1422 # Try to complete a full latex symbol to unicode
1405 # \\alpha -> Ξ±
1423 # \\alpha -> Ξ±
1406 return s, [latex_symbols[s]]
1424 return s, [latex_symbols[s]]
1407 else:
1425 else:
1408 # If a user has partially typed a latex symbol, give them
1426 # If a user has partially typed a latex symbol, give them
1409 # a full list of options \al -> [\aleph, \alpha]
1427 # a full list of options \al -> [\aleph, \alpha]
1410 matches = [k for k in latex_symbols if k.startswith(s)]
1428 matches = [k for k in latex_symbols if k.startswith(s)]
1411 return s, matches
1429 return s, matches
1412 return u'', []
1430 return u'', []
1413
1431
1414 def dispatch_custom_completer(self, text):
1432 def dispatch_custom_completer(self, text):
1415 if not self.custom_completers:
1433 if not self.custom_completers:
1416 return
1434 return
1417
1435
1418 line = self.line_buffer
1436 line = self.line_buffer
1419 if not line.strip():
1437 if not line.strip():
1420 return None
1438 return None
1421
1439
1422 # Create a little structure to pass all the relevant information about
1440 # Create a little structure to pass all the relevant information about
1423 # the current completion to any custom completer.
1441 # the current completion to any custom completer.
1424 event = SimpleNamespace()
1442 event = SimpleNamespace()
1425 event.line = line
1443 event.line = line
1426 event.symbol = text
1444 event.symbol = text
1427 cmd = line.split(None,1)[0]
1445 cmd = line.split(None,1)[0]
1428 event.command = cmd
1446 event.command = cmd
1429 event.text_until_cursor = self.text_until_cursor
1447 event.text_until_cursor = self.text_until_cursor
1430
1448
1431 # for foo etc, try also to find completer for %foo
1449 # for foo etc, try also to find completer for %foo
1432 if not cmd.startswith(self.magic_escape):
1450 if not cmd.startswith(self.magic_escape):
1433 try_magic = self.custom_completers.s_matches(
1451 try_magic = self.custom_completers.s_matches(
1434 self.magic_escape + cmd)
1452 self.magic_escape + cmd)
1435 else:
1453 else:
1436 try_magic = []
1454 try_magic = []
1437
1455
1438 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1456 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1439 try_magic,
1457 try_magic,
1440 self.custom_completers.flat_matches(self.text_until_cursor)):
1458 self.custom_completers.flat_matches(self.text_until_cursor)):
1441 try:
1459 try:
1442 res = c(event)
1460 res = c(event)
1443 if res:
1461 if res:
1444 # first, try case sensitive match
1462 # first, try case sensitive match
1445 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1463 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1446 if withcase:
1464 if withcase:
1447 return withcase
1465 return withcase
1448 # if none, then case insensitive ones are ok too
1466 # if none, then case insensitive ones are ok too
1449 text_low = text.lower()
1467 text_low = text.lower()
1450 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1468 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1451 except TryNext:
1469 except TryNext:
1452 pass
1470 pass
1453
1471
1454 return None
1472 return None
1455
1473
1456 def completions(self, text: str, offset: int)->Iterator[Completion]:
1474 def completions(self, text: str, offset: int)->Iterator[Completion]:
1457 """
1475 """
1458 Returns an iterator over the possible completions
1476 Returns an iterator over the possible completions
1459
1477
1460 .. warning:: Unstable
1478 .. warning:: Unstable
1461
1479
1462 This function is unstable, API may change without warning.
1480 This function is unstable, API may change without warning.
1463 It will also raise unless use in proper context manager.
1481 It will also raise unless use in proper context manager.
1464
1482
1465 Parameters
1483 Parameters
1466 ----------
1484 ----------
1467
1485
1468 text:str
1486 text:str
1469 Full text of the current input, multi line string.
1487 Full text of the current input, multi line string.
1470 offset:int
1488 offset:int
1471 Integer representing the position of the cursor in ``text``. Offset
1489 Integer representing the position of the cursor in ``text``. Offset
1472 is 0-based indexed.
1490 is 0-based indexed.
1473
1491
1474 Yields
1492 Yields
1475 ------
1493 ------
1476 :any:`Completion` object
1494 :any:`Completion` object
1477
1495
1478
1496
1479 The cursor on a text can either be seen as being "in between"
1497 The cursor on a text can either be seen as being "in between"
1480 characters or "On" a character depending on the interface visible to
1498 characters or "On" a character depending on the interface visible to
1481 the user. For consistency the cursor being on "in between" characters X
1499 the user. For consistency the cursor being on "in between" characters X
1482 and Y is equivalent to the cursor being "on" character Y, that is to say
1500 and Y is equivalent to the cursor being "on" character Y, that is to say
1483 the character the cursor is on is considered as being after the cursor.
1501 the character the cursor is on is considered as being after the cursor.
1484
1502
1485 Combining characters may span more that one position in the
1503 Combining characters may span more that one position in the
1486 text.
1504 text.
1487
1505
1488
1506
1489 .. note::
1507 .. note::
1490
1508
1491 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1509 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1492 fake Completion token to distinguish completion returned by Jedi
1510 fake Completion token to distinguish completion returned by Jedi
1493 and usual IPython completion.
1511 and usual IPython completion.
1494
1512
1495 """
1513 """
1496 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1514 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1497 "It may change without warnings. "
1515 "It may change without warnings. "
1498 "Use in corresponding context manager.",
1516 "Use in corresponding context manager.",
1499 category=ProvisionalCompleterWarning, stacklevel=2)
1517 category=ProvisionalCompleterWarning, stacklevel=2)
1500
1518
1501 # Possible Improvements / Known limitation
1519 # Possible Improvements / Known limitation
1502 ##########################################
1520 ##########################################
1503 # Completions may be identical even if they have different ranges and
1521 # Completions may be identical even if they have different ranges and
1504 # text. For example:
1522 # text. For example:
1505 # >>> a=1
1523 # >>> a=1
1506 # >>> a.<tab>
1524 # >>> a.<tab>
1507 # May returns:
1525 # May returns:
1508 # - `a.real` from 0 to 2
1526 # - `a.real` from 0 to 2
1509 # - `.real` from 1 to 2
1527 # - `.real` from 1 to 2
1510 # the current code does not (yet) check for such equivalence
1528 # the current code does not (yet) check for such equivalence
1511 seen = set()
1529 seen = set()
1512 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1530 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1513 if c and (c in seen):
1531 if c and (c in seen):
1514 continue
1532 continue
1515 yield c
1533 yield c
1516 seen.add(c)
1534 seen.add(c)
1517
1535
1518 def _completions(self, full_text: str, offset: int, *, _timeout)->Iterator[Completion]:
1536 def _completions(self, full_text: str, offset: int, *, _timeout)->Iterator[Completion]:
1519 """
1537 """
1520 Core completion module.Same signature as :any:`completions`, with the
1538 Core completion module.Same signature as :any:`completions`, with the
1521 extra `timeout` parameter (in seconds).
1539 extra `timeout` parameter (in seconds).
1522
1540
1523
1541
1524 Computing jedi's completion ``.type`` can be quite expensive (it is a
1542 Computing jedi's completion ``.type`` can be quite expensive (it is a
1525 lazy property) and can require some warm-up, more warm up than just
1543 lazy property) and can require some warm-up, more warm up than just
1526 computing the ``name`` of a completion. The warm-up can be :
1544 computing the ``name`` of a completion. The warm-up can be :
1527
1545
1528 - Long warm-up the fisrt time a module is encountered after
1546 - Long warm-up the fisrt time a module is encountered after
1529 install/update: actually build parse/inference tree.
1547 install/update: actually build parse/inference tree.
1530
1548
1531 - first time the module is encountered in a session: load tree from
1549 - first time the module is encountered in a session: load tree from
1532 disk.
1550 disk.
1533
1551
1534 We don't want to block completions for tens of seconds so we give the
1552 We don't want to block completions for tens of seconds so we give the
1535 completer a "budget" of ``_timeout`` seconds per invocation to compute
1553 completer a "budget" of ``_timeout`` seconds per invocation to compute
1536 completions types, the completions that have not yet been computed will
1554 completions types, the completions that have not yet been computed will
1537 be marked as "unknown" an will have a chance to be computed next round
1555 be marked as "unknown" an will have a chance to be computed next round
1538 are things get cached.
1556 are things get cached.
1539
1557
1540 Keep in mind that Jedi is not the only thing treating the completion so
1558 Keep in mind that Jedi is not the only thing treating the completion so
1541 keep the timeout short-ish as if we take more than 0.3 second we still
1559 keep the timeout short-ish as if we take more than 0.3 second we still
1542 have lots of processing to do.
1560 have lots of processing to do.
1543
1561
1544 """
1562 """
1545 deadline = time.monotonic() + _timeout
1563 deadline = time.monotonic() + _timeout
1546
1564
1547
1565
1548 before = full_text[:offset]
1566 before = full_text[:offset]
1549 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1567 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1550
1568
1551 matched_text, matches, matches_origin, jedi_matches = self._complete(
1569 matched_text, matches, matches_origin, jedi_matches = self._complete(
1552 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1570 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1553
1571
1554 iter_jm = iter(jedi_matches)
1572 iter_jm = iter(jedi_matches)
1555 if _timeout:
1573 if _timeout:
1556 for jm in iter_jm:
1574 for jm in iter_jm:
1557 delta = len(jm.name_with_symbols) - len(jm.complete)
1575 delta = len(jm.name_with_symbols) - len(jm.complete)
1558 yield Completion(start=offset - delta,
1576 yield Completion(start=offset - delta,
1559 end=offset,
1577 end=offset,
1560 text=jm.name_with_symbols,
1578 text=jm.name_with_symbols,
1561 type=jm.type,
1579 type=jm.type,
1562 _origin='jedi')
1580 _origin='jedi')
1563
1581
1564 if time.monotonic() > deadline:
1582 if time.monotonic() > deadline:
1565 break
1583 break
1566
1584
1567 for jm in iter_jm:
1585 for jm in iter_jm:
1568 delta = len(jm.name_with_symbols) - len(jm.complete)
1586 delta = len(jm.name_with_symbols) - len(jm.complete)
1569 yield Completion(start=offset - delta,
1587 yield Completion(start=offset - delta,
1570 end=offset,
1588 end=offset,
1571 text=jm.name_with_symbols,
1589 text=jm.name_with_symbols,
1572 type='<unknown>', # don't compute type for speed
1590 type='<unknown>', # don't compute type for speed
1573 _origin='jedi')
1591 _origin='jedi')
1574
1592
1575
1593
1576 start_offset = before.rfind(matched_text)
1594 start_offset = before.rfind(matched_text)
1577
1595
1578 # TODO:
1596 # TODO:
1579 # Supress this, right now just for debug.
1597 # Supress this, right now just for debug.
1580 if jedi_matches and matches and self.debug:
1598 if jedi_matches and matches and self.debug:
1581 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--', _origin='debug')
1599 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--', _origin='debug')
1582
1600
1583 # I'm unsure if this is always true, so let's assert and see if it
1601 # I'm unsure if this is always true, so let's assert and see if it
1584 # crash
1602 # crash
1585 assert before.endswith(matched_text)
1603 assert before.endswith(matched_text)
1586 for m, t in zip(matches, matches_origin):
1604 for m, t in zip(matches, matches_origin):
1587 yield Completion(start=start_offset, end=offset, text=m, _origin=t)
1605 yield Completion(start=start_offset, end=offset, text=m, _origin=t)
1588
1606
1589
1607
1590 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1608 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1591 """Find completions for the given text and line context.
1609 """Find completions for the given text and line context.
1592
1610
1593 Note that both the text and the line_buffer are optional, but at least
1611 Note that both the text and the line_buffer are optional, but at least
1594 one of them must be given.
1612 one of them must be given.
1595
1613
1596 Parameters
1614 Parameters
1597 ----------
1615 ----------
1598 text : string, optional
1616 text : string, optional
1599 Text to perform the completion on. If not given, the line buffer
1617 Text to perform the completion on. If not given, the line buffer
1600 is split using the instance's CompletionSplitter object.
1618 is split using the instance's CompletionSplitter object.
1601
1619
1602 line_buffer : string, optional
1620 line_buffer : string, optional
1603 If not given, the completer attempts to obtain the current line
1621 If not given, the completer attempts to obtain the current line
1604 buffer via readline. This keyword allows clients which are
1622 buffer via readline. This keyword allows clients which are
1605 requesting for text completions in non-readline contexts to inform
1623 requesting for text completions in non-readline contexts to inform
1606 the completer of the entire text.
1624 the completer of the entire text.
1607
1625
1608 cursor_pos : int, optional
1626 cursor_pos : int, optional
1609 Index of the cursor in the full line buffer. Should be provided by
1627 Index of the cursor in the full line buffer. Should be provided by
1610 remote frontends where kernel has no access to frontend state.
1628 remote frontends where kernel has no access to frontend state.
1611
1629
1612 Returns
1630 Returns
1613 -------
1631 -------
1614 text : str
1632 text : str
1615 Text that was actually used in the completion.
1633 Text that was actually used in the completion.
1616
1634
1617 matches : list
1635 matches : list
1618 A list of completion matches.
1636 A list of completion matches.
1619
1637
1620
1638
1621 .. note::
1639 .. note::
1622
1640
1623 This API is likely to be deprecated and replaced by
1641 This API is likely to be deprecated and replaced by
1624 :any:`IPCompleter.completions` in the future.
1642 :any:`IPCompleter.completions` in the future.
1625
1643
1626
1644
1627 """
1645 """
1628 warnings.warn('`Completer.complete` is pending deprecation since '
1646 warnings.warn('`Completer.complete` is pending deprecation since '
1629 'IPython 6.0 and will be replaced by `Completer.completions`.',
1647 'IPython 6.0 and will be replaced by `Completer.completions`.',
1630 PendingDeprecationWarning)
1648 PendingDeprecationWarning)
1631 # potential todo, FOLD the 3rd throw away argument of _complete
1649 # potential todo, FOLD the 3rd throw away argument of _complete
1632 # into the first 2 one.
1650 # into the first 2 one.
1633 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
1651 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
1634
1652
1635 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
1653 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
1636 full_text=None, return_jedi_results=True) -> (str, List[str], List[object]):
1654 full_text=None, return_jedi_results=True) -> (str, List[str], List[object]):
1637 """
1655 """
1638
1656
1639 Like complete but can also returns raw jedi completions as well as the
1657 Like complete but can also returns raw jedi completions as well as the
1640 origin of the completion text. This could (and should) be made much
1658 origin of the completion text. This could (and should) be made much
1641 cleaner but that will be simpler once we drop the old (and stateful)
1659 cleaner but that will be simpler once we drop the old (and stateful)
1642 :any:`complete` API.
1660 :any:`complete` API.
1643
1661
1644
1662
1645 With current provisional API, cursor_pos act both (depending on the
1663 With current provisional API, cursor_pos act both (depending on the
1646 caller) as the offset in the ``text`` or ``line_buffer``, or as the
1664 caller) as the offset in the ``text`` or ``line_buffer``, or as the
1647 ``column`` when passing multiline strings this could/should be renamed
1665 ``column`` when passing multiline strings this could/should be renamed
1648 but would add extra noise.
1666 but would add extra noise.
1649 """
1667 """
1650
1668
1651 # if the cursor position isn't given, the only sane assumption we can
1669 # if the cursor position isn't given, the only sane assumption we can
1652 # make is that it's at the end of the line (the common case)
1670 # make is that it's at the end of the line (the common case)
1653 if cursor_pos is None:
1671 if cursor_pos is None:
1654 cursor_pos = len(line_buffer) if text is None else len(text)
1672 cursor_pos = len(line_buffer) if text is None else len(text)
1655
1673
1656 if self.use_main_ns:
1674 if self.use_main_ns:
1657 self.namespace = __main__.__dict__
1675 self.namespace = __main__.__dict__
1658
1676
1659 # if text is either None or an empty string, rely on the line buffer
1677 # if text is either None or an empty string, rely on the line buffer
1660 if (not line_buffer) and full_text:
1678 if (not line_buffer) and full_text:
1661 line_buffer = full_text.split('\n')[cursor_line]
1679 line_buffer = full_text.split('\n')[cursor_line]
1662 if not text:
1680 if not text:
1663 text = self.splitter.split_line(line_buffer, cursor_pos)
1681 text = self.splitter.split_line(line_buffer, cursor_pos)
1664
1682
1665 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1683 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1666 latex_text, latex_matches = self.latex_matches(base_text)
1684 latex_text, latex_matches = self.latex_matches(base_text)
1667 if latex_matches:
1685 if latex_matches:
1668 return latex_text, latex_matches, ['latex_matches']*len(latex_matches), ()
1686 return latex_text, latex_matches, ['latex_matches']*len(latex_matches), ()
1669 name_text = ''
1687 name_text = ''
1670 name_matches = []
1688 name_matches = []
1671 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1689 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1672 name_text, name_matches = meth(base_text)
1690 name_text, name_matches = meth(base_text)
1673 if name_text:
1691 if name_text:
1674 return name_text, name_matches, [meth.__qualname__]*len(name_matches), {}
1692 return name_text, name_matches, [meth.__qualname__]*len(name_matches), {}
1675
1693
1676
1694
1677 # If no line buffer is given, assume the input text is all there was
1695 # If no line buffer is given, assume the input text is all there was
1678 if line_buffer is None:
1696 if line_buffer is None:
1679 line_buffer = text
1697 line_buffer = text
1680
1698
1681 self.line_buffer = line_buffer
1699 self.line_buffer = line_buffer
1682 self.text_until_cursor = self.line_buffer[:cursor_pos]
1700 self.text_until_cursor = self.line_buffer[:cursor_pos]
1683
1701
1684 # Start with a clean slate of completions
1702 # Start with a clean slate of completions
1685 matches = []
1703 matches = []
1686 custom_res = self.dispatch_custom_completer(text)
1704 custom_res = self.dispatch_custom_completer(text)
1687 # FIXME: we should extend our api to return a dict with completions for
1705 # FIXME: we should extend our api to return a dict with completions for
1688 # different types of objects. The rlcomplete() method could then
1706 # different types of objects. The rlcomplete() method could then
1689 # simply collapse the dict into a list for readline, but we'd have
1707 # simply collapse the dict into a list for readline, but we'd have
1690 # richer completion semantics in other evironments.
1708 # richer completion semantics in other evironments.
1691 completions = ()
1709 completions = ()
1692 if self.use_jedi and return_jedi_results:
1710 if self.use_jedi and return_jedi_results:
1693 if not full_text:
1711 if not full_text:
1694 full_text = line_buffer
1712 full_text = line_buffer
1695 completions = self._jedi_matches(
1713 completions = self._jedi_matches(
1696 cursor_pos, cursor_line, full_text)
1714 cursor_pos, cursor_line, full_text)
1697 if custom_res is not None:
1715 if custom_res is not None:
1698 # did custom completers produce something?
1716 # did custom completers produce something?
1699 matches = [(m, 'custom') for m in custom_res]
1717 matches = [(m, 'custom') for m in custom_res]
1700 else:
1718 else:
1701 # Extend the list of completions with the results of each
1719 # Extend the list of completions with the results of each
1702 # matcher, so we return results to the user from all
1720 # matcher, so we return results to the user from all
1703 # namespaces.
1721 # namespaces.
1704 if self.merge_completions:
1722 if self.merge_completions:
1705 matches = []
1723 matches = []
1706 for matcher in self.matchers:
1724 for matcher in self.matchers:
1707 try:
1725 try:
1708 matches.extend([(m, matcher.__qualname__)
1726 matches.extend([(m, matcher.__qualname__)
1709 for m in matcher(text)])
1727 for m in matcher(text)])
1710 except:
1728 except:
1711 # Show the ugly traceback if the matcher causes an
1729 # Show the ugly traceback if the matcher causes an
1712 # exception, but do NOT crash the kernel!
1730 # exception, but do NOT crash the kernel!
1713 sys.excepthook(*sys.exc_info())
1731 sys.excepthook(*sys.exc_info())
1714 else:
1732 else:
1715 for matcher in self.matchers:
1733 for matcher in self.matchers:
1716 matches = [(m, matcher.__qualname__)
1734 matches = [(m, matcher.__qualname__)
1717 for m in matcher(text)]
1735 for m in matcher(text)]
1718 if matches:
1736 if matches:
1719 break
1737 break
1720 seen = set()
1738 seen = set()
1721 filtered_matches = set()
1739 filtered_matches = set()
1722 for m in matches:
1740 for m in matches:
1723 t, c = m
1741 t, c = m
1724 if t not in seen:
1742 if t not in seen:
1725 filtered_matches.add(m)
1743 filtered_matches.add(m)
1726 seen.add(t)
1744 seen.add(t)
1727
1745
1728 filtered_matches = sorted(
1746 filtered_matches = sorted(
1729 set(filtered_matches), key=lambda x: completions_sorting_key(x[0]))
1747 set(filtered_matches), key=lambda x: completions_sorting_key(x[0]))
1730
1748
1731 matches = [m[0] for m in filtered_matches]
1749 matches = [m[0] for m in filtered_matches]
1732 origins = [m[1] for m in filtered_matches]
1750 origins = [m[1] for m in filtered_matches]
1733
1751
1734 self.matches = matches
1752 self.matches = matches
1735
1753
1736 return text, matches, origins, completions
1754 return text, matches, origins, completions
@@ -1,822 +1,843 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Tests for the IPython tab-completion machinery."""
2 """Tests for the IPython tab-completion machinery."""
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import sys
8 import sys
9 import unittest
9 import unittest
10
10
11 from contextlib import contextmanager
11 from contextlib import contextmanager
12
12
13 import nose.tools as nt
13 import nose.tools as nt
14
14
15 from traitlets.config.loader import Config
15 from traitlets.config.loader import Config
16 from IPython import get_ipython
16 from IPython import get_ipython
17 from IPython.core import completer
17 from IPython.core import completer
18 from IPython.external.decorators import knownfailureif
18 from IPython.external.decorators import knownfailureif
19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 from IPython.utils.generics import complete_object
20 from IPython.utils.generics import complete_object
21 from IPython.testing import decorators as dec
21 from IPython.testing import decorators as dec
22
22
23 from IPython.core.completer import Completion, provisionalcompleter
23 from IPython.core.completer import Completion, provisionalcompleter, match_dict_keys
24 from nose.tools import assert_in, assert_not_in
24 from nose.tools import assert_in, assert_not_in
25
25
26 #-----------------------------------------------------------------------------
26 #-----------------------------------------------------------------------------
27 # Test functions
27 # Test functions
28 #-----------------------------------------------------------------------------
28 #-----------------------------------------------------------------------------
29
29
30 @contextmanager
30 @contextmanager
31 def greedy_completion():
31 def greedy_completion():
32 ip = get_ipython()
32 ip = get_ipython()
33 greedy_original = ip.Completer.greedy
33 greedy_original = ip.Completer.greedy
34 try:
34 try:
35 ip.Completer.greedy = True
35 ip.Completer.greedy = True
36 yield
36 yield
37 finally:
37 finally:
38 ip.Completer.greedy = greedy_original
38 ip.Completer.greedy = greedy_original
39
39
40 def test_protect_filename():
40 def test_protect_filename():
41 if sys.platform == 'win32':
41 if sys.platform == 'win32':
42 pairs = [('abc','abc'),
42 pairs = [('abc','abc'),
43 (' abc','" abc"'),
43 (' abc','" abc"'),
44 ('a bc','"a bc"'),
44 ('a bc','"a bc"'),
45 ('a bc','"a bc"'),
45 ('a bc','"a bc"'),
46 (' bc','" bc"'),
46 (' bc','" bc"'),
47 ]
47 ]
48 else:
48 else:
49 pairs = [('abc','abc'),
49 pairs = [('abc','abc'),
50 (' abc',r'\ abc'),
50 (' abc',r'\ abc'),
51 ('a bc',r'a\ bc'),
51 ('a bc',r'a\ bc'),
52 ('a bc',r'a\ \ bc'),
52 ('a bc',r'a\ \ bc'),
53 (' bc',r'\ \ bc'),
53 (' bc',r'\ \ bc'),
54 # On posix, we also protect parens and other special characters.
54 # On posix, we also protect parens and other special characters.
55 ('a(bc',r'a\(bc'),
55 ('a(bc',r'a\(bc'),
56 ('a)bc',r'a\)bc'),
56 ('a)bc',r'a\)bc'),
57 ('a( )bc',r'a\(\ \)bc'),
57 ('a( )bc',r'a\(\ \)bc'),
58 ('a[1]bc', r'a\[1\]bc'),
58 ('a[1]bc', r'a\[1\]bc'),
59 ('a{1}bc', r'a\{1\}bc'),
59 ('a{1}bc', r'a\{1\}bc'),
60 ('a#bc', r'a\#bc'),
60 ('a#bc', r'a\#bc'),
61 ('a?bc', r'a\?bc'),
61 ('a?bc', r'a\?bc'),
62 ('a=bc', r'a\=bc'),
62 ('a=bc', r'a\=bc'),
63 ('a\\bc', r'a\\bc'),
63 ('a\\bc', r'a\\bc'),
64 ('a|bc', r'a\|bc'),
64 ('a|bc', r'a\|bc'),
65 ('a;bc', r'a\;bc'),
65 ('a;bc', r'a\;bc'),
66 ('a:bc', r'a\:bc'),
66 ('a:bc', r'a\:bc'),
67 ("a'bc", r"a\'bc"),
67 ("a'bc", r"a\'bc"),
68 ('a*bc', r'a\*bc'),
68 ('a*bc', r'a\*bc'),
69 ('a"bc', r'a\"bc'),
69 ('a"bc', r'a\"bc'),
70 ('a^bc', r'a\^bc'),
70 ('a^bc', r'a\^bc'),
71 ('a&bc', r'a\&bc'),
71 ('a&bc', r'a\&bc'),
72 ]
72 ]
73 # run the actual tests
73 # run the actual tests
74 for s1, s2 in pairs:
74 for s1, s2 in pairs:
75 s1p = completer.protect_filename(s1)
75 s1p = completer.protect_filename(s1)
76 nt.assert_equal(s1p, s2)
76 nt.assert_equal(s1p, s2)
77
77
78
78
79 def check_line_split(splitter, test_specs):
79 def check_line_split(splitter, test_specs):
80 for part1, part2, split in test_specs:
80 for part1, part2, split in test_specs:
81 cursor_pos = len(part1)
81 cursor_pos = len(part1)
82 line = part1+part2
82 line = part1+part2
83 out = splitter.split_line(line, cursor_pos)
83 out = splitter.split_line(line, cursor_pos)
84 nt.assert_equal(out, split)
84 nt.assert_equal(out, split)
85
85
86
86
87 def test_line_split():
87 def test_line_split():
88 """Basic line splitter test with default specs."""
88 """Basic line splitter test with default specs."""
89 sp = completer.CompletionSplitter()
89 sp = completer.CompletionSplitter()
90 # The format of the test specs is: part1, part2, expected answer. Parts 1
90 # The format of the test specs is: part1, part2, expected answer. Parts 1
91 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
91 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
92 # was at the end of part1. So an empty part2 represents someone hitting
92 # was at the end of part1. So an empty part2 represents someone hitting
93 # tab at the end of the line, the most common case.
93 # tab at the end of the line, the most common case.
94 t = [('run some/scrip', '', 'some/scrip'),
94 t = [('run some/scrip', '', 'some/scrip'),
95 ('run scripts/er', 'ror.py foo', 'scripts/er'),
95 ('run scripts/er', 'ror.py foo', 'scripts/er'),
96 ('echo $HOM', '', 'HOM'),
96 ('echo $HOM', '', 'HOM'),
97 ('print sys.pa', '', 'sys.pa'),
97 ('print sys.pa', '', 'sys.pa'),
98 ('print(sys.pa', '', 'sys.pa'),
98 ('print(sys.pa', '', 'sys.pa'),
99 ("execfile('scripts/er", '', 'scripts/er'),
99 ("execfile('scripts/er", '', 'scripts/er'),
100 ('a[x.', '', 'x.'),
100 ('a[x.', '', 'x.'),
101 ('a[x.', 'y', 'x.'),
101 ('a[x.', 'y', 'x.'),
102 ('cd "some_file/', '', 'some_file/'),
102 ('cd "some_file/', '', 'some_file/'),
103 ]
103 ]
104 check_line_split(sp, t)
104 check_line_split(sp, t)
105 # Ensure splitting works OK with unicode by re-running the tests with
105 # Ensure splitting works OK with unicode by re-running the tests with
106 # all inputs turned into unicode
106 # all inputs turned into unicode
107 check_line_split(sp, [ map(str, p) for p in t] )
107 check_line_split(sp, [ map(str, p) for p in t] )
108
108
109
109
110 def test_custom_completion_error():
110 def test_custom_completion_error():
111 """Test that errors from custom attribute completers are silenced."""
111 """Test that errors from custom attribute completers are silenced."""
112 ip = get_ipython()
112 ip = get_ipython()
113 class A(object): pass
113 class A(object): pass
114 ip.user_ns['a'] = A()
114 ip.user_ns['a'] = A()
115
115
116 @complete_object.when_type(A)
116 @complete_object.when_type(A)
117 def complete_A(a, existing_completions):
117 def complete_A(a, existing_completions):
118 raise TypeError("this should be silenced")
118 raise TypeError("this should be silenced")
119
119
120 ip.complete("a.")
120 ip.complete("a.")
121
121
122
122
123 def test_unicode_completions():
123 def test_unicode_completions():
124 ip = get_ipython()
124 ip = get_ipython()
125 # Some strings that trigger different types of completion. Check them both
125 # Some strings that trigger different types of completion. Check them both
126 # in str and unicode forms
126 # in str and unicode forms
127 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
127 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
128 for t in s + list(map(str, s)):
128 for t in s + list(map(str, s)):
129 # We don't need to check exact completion values (they may change
129 # We don't need to check exact completion values (they may change
130 # depending on the state of the namespace, but at least no exceptions
130 # depending on the state of the namespace, but at least no exceptions
131 # should be thrown and the return value should be a pair of text, list
131 # should be thrown and the return value should be a pair of text, list
132 # values.
132 # values.
133 text, matches = ip.complete(t)
133 text, matches = ip.complete(t)
134 nt.assert_true(isinstance(text, str))
134 nt.assert_true(isinstance(text, str))
135 nt.assert_true(isinstance(matches, list))
135 nt.assert_true(isinstance(matches, list))
136
136
137 def test_latex_completions():
137 def test_latex_completions():
138 from IPython.core.latex_symbols import latex_symbols
138 from IPython.core.latex_symbols import latex_symbols
139 import random
139 import random
140 ip = get_ipython()
140 ip = get_ipython()
141 # Test some random unicode symbols
141 # Test some random unicode symbols
142 keys = random.sample(latex_symbols.keys(), 10)
142 keys = random.sample(latex_symbols.keys(), 10)
143 for k in keys:
143 for k in keys:
144 text, matches = ip.complete(k)
144 text, matches = ip.complete(k)
145 nt.assert_equal(len(matches),1)
145 nt.assert_equal(len(matches),1)
146 nt.assert_equal(text, k)
146 nt.assert_equal(text, k)
147 nt.assert_equal(matches[0], latex_symbols[k])
147 nt.assert_equal(matches[0], latex_symbols[k])
148 # Test a more complex line
148 # Test a more complex line
149 text, matches = ip.complete(u'print(\\alpha')
149 text, matches = ip.complete(u'print(\\alpha')
150 nt.assert_equal(text, u'\\alpha')
150 nt.assert_equal(text, u'\\alpha')
151 nt.assert_equal(matches[0], latex_symbols['\\alpha'])
151 nt.assert_equal(matches[0], latex_symbols['\\alpha'])
152 # Test multiple matching latex symbols
152 # Test multiple matching latex symbols
153 text, matches = ip.complete(u'\\al')
153 text, matches = ip.complete(u'\\al')
154 nt.assert_in('\\alpha', matches)
154 nt.assert_in('\\alpha', matches)
155 nt.assert_in('\\aleph', matches)
155 nt.assert_in('\\aleph', matches)
156
156
157
157
158
158
159
159
160 def test_back_latex_completion():
160 def test_back_latex_completion():
161 ip = get_ipython()
161 ip = get_ipython()
162
162
163 # do not return more than 1 matches fro \beta, only the latex one.
163 # do not return more than 1 matches fro \beta, only the latex one.
164 name, matches = ip.complete('\\Ξ²')
164 name, matches = ip.complete('\\Ξ²')
165 nt.assert_equal(len(matches), 1)
165 nt.assert_equal(len(matches), 1)
166 nt.assert_equal(matches[0], '\\beta')
166 nt.assert_equal(matches[0], '\\beta')
167
167
168 def test_back_unicode_completion():
168 def test_back_unicode_completion():
169 ip = get_ipython()
169 ip = get_ipython()
170
170
171 name, matches = ip.complete('\\β…€')
171 name, matches = ip.complete('\\β…€')
172 nt.assert_equal(len(matches), 1)
172 nt.assert_equal(len(matches), 1)
173 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
173 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
174
174
175
175
176 def test_forward_unicode_completion():
176 def test_forward_unicode_completion():
177 ip = get_ipython()
177 ip = get_ipython()
178
178
179 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
179 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
180 nt.assert_equal(len(matches), 1)
180 nt.assert_equal(len(matches), 1)
181 nt.assert_equal(matches[0], 'β…€')
181 nt.assert_equal(matches[0], 'β…€')
182
182
183 @dec.knownfailureif(sys.platform == 'win32', 'Fails if there is a C:\\j... path')
183 @dec.knownfailureif(sys.platform == 'win32', 'Fails if there is a C:\\j... path')
184 def test_no_ascii_back_completion():
184 def test_no_ascii_back_completion():
185 ip = get_ipython()
185 ip = get_ipython()
186 with TemporaryWorkingDirectory(): # Avoid any filename completions
186 with TemporaryWorkingDirectory(): # Avoid any filename completions
187 # single ascii letter that don't have yet completions
187 # single ascii letter that don't have yet completions
188 for letter in 'jJ' :
188 for letter in 'jJ' :
189 name, matches = ip.complete('\\'+letter)
189 name, matches = ip.complete('\\'+letter)
190 nt.assert_equal(matches, [])
190 nt.assert_equal(matches, [])
191
191
192
192
193
193
194
194
195 class CompletionSplitterTestCase(unittest.TestCase):
195 class CompletionSplitterTestCase(unittest.TestCase):
196 def setUp(self):
196 def setUp(self):
197 self.sp = completer.CompletionSplitter()
197 self.sp = completer.CompletionSplitter()
198
198
199 def test_delim_setting(self):
199 def test_delim_setting(self):
200 self.sp.delims = ' '
200 self.sp.delims = ' '
201 nt.assert_equal(self.sp.delims, ' ')
201 nt.assert_equal(self.sp.delims, ' ')
202 nt.assert_equal(self.sp._delim_expr, '[\ ]')
202 nt.assert_equal(self.sp._delim_expr, '[\ ]')
203
203
204 def test_spaces(self):
204 def test_spaces(self):
205 """Test with only spaces as split chars."""
205 """Test with only spaces as split chars."""
206 self.sp.delims = ' '
206 self.sp.delims = ' '
207 t = [('foo', '', 'foo'),
207 t = [('foo', '', 'foo'),
208 ('run foo', '', 'foo'),
208 ('run foo', '', 'foo'),
209 ('run foo', 'bar', 'foo'),
209 ('run foo', 'bar', 'foo'),
210 ]
210 ]
211 check_line_split(self.sp, t)
211 check_line_split(self.sp, t)
212
212
213
213
214 def test_has_open_quotes1():
214 def test_has_open_quotes1():
215 for s in ["'", "'''", "'hi' '"]:
215 for s in ["'", "'''", "'hi' '"]:
216 nt.assert_equal(completer.has_open_quotes(s), "'")
216 nt.assert_equal(completer.has_open_quotes(s), "'")
217
217
218
218
219 def test_has_open_quotes2():
219 def test_has_open_quotes2():
220 for s in ['"', '"""', '"hi" "']:
220 for s in ['"', '"""', '"hi" "']:
221 nt.assert_equal(completer.has_open_quotes(s), '"')
221 nt.assert_equal(completer.has_open_quotes(s), '"')
222
222
223
223
224 def test_has_open_quotes3():
224 def test_has_open_quotes3():
225 for s in ["''", "''' '''", "'hi' 'ipython'"]:
225 for s in ["''", "''' '''", "'hi' 'ipython'"]:
226 nt.assert_false(completer.has_open_quotes(s))
226 nt.assert_false(completer.has_open_quotes(s))
227
227
228
228
229 def test_has_open_quotes4():
229 def test_has_open_quotes4():
230 for s in ['""', '""" """', '"hi" "ipython"']:
230 for s in ['""', '""" """', '"hi" "ipython"']:
231 nt.assert_false(completer.has_open_quotes(s))
231 nt.assert_false(completer.has_open_quotes(s))
232
232
233
233
234 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
234 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
235 def test_abspath_file_completions():
235 def test_abspath_file_completions():
236 ip = get_ipython()
236 ip = get_ipython()
237 with TemporaryDirectory() as tmpdir:
237 with TemporaryDirectory() as tmpdir:
238 prefix = os.path.join(tmpdir, 'foo')
238 prefix = os.path.join(tmpdir, 'foo')
239 suffixes = ['1', '2']
239 suffixes = ['1', '2']
240 names = [prefix+s for s in suffixes]
240 names = [prefix+s for s in suffixes]
241 for n in names:
241 for n in names:
242 open(n, 'w').close()
242 open(n, 'w').close()
243
243
244 # Check simple completion
244 # Check simple completion
245 c = ip.complete(prefix)[1]
245 c = ip.complete(prefix)[1]
246 nt.assert_equal(c, names)
246 nt.assert_equal(c, names)
247
247
248 # Now check with a function call
248 # Now check with a function call
249 cmd = 'a = f("%s' % prefix
249 cmd = 'a = f("%s' % prefix
250 c = ip.complete(prefix, cmd)[1]
250 c = ip.complete(prefix, cmd)[1]
251 comp = [prefix+s for s in suffixes]
251 comp = [prefix+s for s in suffixes]
252 nt.assert_equal(c, comp)
252 nt.assert_equal(c, comp)
253
253
254
254
255 def test_local_file_completions():
255 def test_local_file_completions():
256 ip = get_ipython()
256 ip = get_ipython()
257 with TemporaryWorkingDirectory():
257 with TemporaryWorkingDirectory():
258 prefix = './foo'
258 prefix = './foo'
259 suffixes = ['1', '2']
259 suffixes = ['1', '2']
260 names = [prefix+s for s in suffixes]
260 names = [prefix+s for s in suffixes]
261 for n in names:
261 for n in names:
262 open(n, 'w').close()
262 open(n, 'w').close()
263
263
264 # Check simple completion
264 # Check simple completion
265 c = ip.complete(prefix)[1]
265 c = ip.complete(prefix)[1]
266 nt.assert_equal(c, names)
266 nt.assert_equal(c, names)
267
267
268 # Now check with a function call
268 # Now check with a function call
269 cmd = 'a = f("%s' % prefix
269 cmd = 'a = f("%s' % prefix
270 c = ip.complete(prefix, cmd)[1]
270 c = ip.complete(prefix, cmd)[1]
271 comp = set(prefix+s for s in suffixes)
271 comp = set(prefix+s for s in suffixes)
272 nt.assert_true(comp.issubset(set(c)))
272 nt.assert_true(comp.issubset(set(c)))
273
273
274
274
275 def test_jedi():
275 def test_jedi():
276 """
276 """
277 A couple of issue we had with Jedi
277 A couple of issue we had with Jedi
278 """
278 """
279 ip = get_ipython()
279 ip = get_ipython()
280
280
281 def _test_complete(reason, s, comp, start=None, end=None):
281 def _test_complete(reason, s, comp, start=None, end=None):
282 l = len(s)
282 l = len(s)
283 start = start if start is not None else l
283 start = start if start is not None else l
284 end = end if end is not None else l
284 end = end if end is not None else l
285 with provisionalcompleter():
285 with provisionalcompleter():
286 completions = set(ip.Completer.completions(s, l))
286 completions = set(ip.Completer.completions(s, l))
287 assert_in(Completion(start, end, comp), completions, reason)
287 assert_in(Completion(start, end, comp), completions, reason)
288
288
289 def _test_not_complete(reason, s, comp):
289 def _test_not_complete(reason, s, comp):
290 l = len(s)
290 l = len(s)
291 with provisionalcompleter():
291 with provisionalcompleter():
292 completions = set(ip.Completer.completions(s, l))
292 completions = set(ip.Completer.completions(s, l))
293 assert_not_in(Completion(l, l, comp), completions, reason)
293 assert_not_in(Completion(l, l, comp), completions, reason)
294
294
295 import jedi
295 import jedi
296 jedi_version = tuple(int(i) for i in jedi.__version__.split('.')[:3])
296 jedi_version = tuple(int(i) for i in jedi.__version__.split('.')[:3])
297 if jedi_version > (0,10):
297 if jedi_version > (0,10):
298 yield _test_complete, 'jedi >0.9 should complete and not crash', 'a=1;a.', 'real'
298 yield _test_complete, 'jedi >0.9 should complete and not crash', 'a=1;a.', 'real'
299 yield _test_complete, 'can infer first argument', 'a=(1,"foo");a[0].', 'real'
299 yield _test_complete, 'can infer first argument', 'a=(1,"foo");a[0].', 'real'
300 yield _test_complete, 'can infer second argument', 'a=(1,"foo");a[1].', 'capitalize'
300 yield _test_complete, 'can infer second argument', 'a=(1,"foo");a[1].', 'capitalize'
301 yield _test_complete, 'cover duplicate completions', 'im', 'import', 0, 2
301 yield _test_complete, 'cover duplicate completions', 'im', 'import', 0, 2
302
302
303 yield _test_not_complete, 'does not mix types', 'a=(1,"foo");a[0].', 'capitalize'
303 yield _test_not_complete, 'does not mix types', 'a=(1,"foo");a[0].', 'capitalize'
304
304
305
305
306 def test_greedy_completions():
306 def test_greedy_completions():
307 """
307 """
308 Test the capability of the Greedy completer.
308 Test the capability of the Greedy completer.
309
309
310 Most of the test here do not really show off the greedy completer, for proof
310 Most of the test here do not really show off the greedy completer, for proof
311 each of the text bellow now pass with Jedi. The greedy completer is capable of more.
311 each of the text bellow now pass with Jedi. The greedy completer is capable of more.
312
312
313 See the :any:`test_dict_key_completion_contexts`
313 See the :any:`test_dict_key_completion_contexts`
314
314
315 """
315 """
316 ip = get_ipython()
316 ip = get_ipython()
317 ip.ex('a=list(range(5))')
317 ip.ex('a=list(range(5))')
318 _,c = ip.complete('.',line='a[0].')
318 _,c = ip.complete('.',line='a[0].')
319 nt.assert_false('.real' in c,
319 nt.assert_false('.real' in c,
320 "Shouldn't have completed on a[0]: %s"%c)
320 "Shouldn't have completed on a[0]: %s"%c)
321 with greedy_completion(), provisionalcompleter():
321 with greedy_completion(), provisionalcompleter():
322 def _(line, cursor_pos, expect, message, completion):
322 def _(line, cursor_pos, expect, message, completion):
323 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
323 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
324 with provisionalcompleter():
324 with provisionalcompleter():
325 completions = ip.Completer.completions(line, cursor_pos)
325 completions = ip.Completer.completions(line, cursor_pos)
326 nt.assert_in(expect, c, message%c)
326 nt.assert_in(expect, c, message%c)
327 nt.assert_in(completion, completions)
327 nt.assert_in(completion, completions)
328
328
329 yield _, 'a[0].', 5, 'a[0].real', "Should have completed on a[0].: %s", Completion(5,5, 'real')
329 yield _, 'a[0].', 5, 'a[0].real', "Should have completed on a[0].: %s", Completion(5,5, 'real')
330 yield _, 'a[0].r', 6, 'a[0].real', "Should have completed on a[0].r: %s", Completion(5,6, 'real')
330 yield _, 'a[0].r', 6, 'a[0].real', "Should have completed on a[0].r: %s", Completion(5,6, 'real')
331
331
332 if sys.version_info > (3, 4):
332 if sys.version_info > (3, 4):
333 yield _, 'a[0].from_', 10, 'a[0].from_bytes', "Should have completed on a[0].from_: %s", Completion(5, 10, 'from_bytes')
333 yield _, 'a[0].from_', 10, 'a[0].from_bytes', "Should have completed on a[0].from_: %s", Completion(5, 10, 'from_bytes')
334
334
335
335
336 def test_omit__names():
336 def test_omit__names():
337 # also happens to test IPCompleter as a configurable
337 # also happens to test IPCompleter as a configurable
338 ip = get_ipython()
338 ip = get_ipython()
339 ip._hidden_attr = 1
339 ip._hidden_attr = 1
340 ip._x = {}
340 ip._x = {}
341 c = ip.Completer
341 c = ip.Completer
342 ip.ex('ip=get_ipython()')
342 ip.ex('ip=get_ipython()')
343 cfg = Config()
343 cfg = Config()
344 cfg.IPCompleter.omit__names = 0
344 cfg.IPCompleter.omit__names = 0
345 c.update_config(cfg)
345 c.update_config(cfg)
346 with provisionalcompleter():
346 with provisionalcompleter():
347 s,matches = c.complete('ip.')
347 s,matches = c.complete('ip.')
348 completions = set(c.completions('ip.', 3))
348 completions = set(c.completions('ip.', 3))
349
349
350 nt.assert_in('ip.__str__', matches)
350 nt.assert_in('ip.__str__', matches)
351 nt.assert_in(Completion(3, 3, '__str__'), completions)
351 nt.assert_in(Completion(3, 3, '__str__'), completions)
352
352
353 nt.assert_in('ip._hidden_attr', matches)
353 nt.assert_in('ip._hidden_attr', matches)
354 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
354 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
355
355
356
356
357 cfg = Config()
357 cfg = Config()
358 cfg.IPCompleter.omit__names = 1
358 cfg.IPCompleter.omit__names = 1
359 c.update_config(cfg)
359 c.update_config(cfg)
360 with provisionalcompleter():
360 with provisionalcompleter():
361 s,matches = c.complete('ip.')
361 s,matches = c.complete('ip.')
362 completions = set(c.completions('ip.', 3))
362 completions = set(c.completions('ip.', 3))
363
363
364 nt.assert_not_in('ip.__str__', matches)
364 nt.assert_not_in('ip.__str__', matches)
365 nt.assert_not_in(Completion(3,3,'__str__'), completions)
365 nt.assert_not_in(Completion(3,3,'__str__'), completions)
366
366
367 # nt.assert_in('ip._hidden_attr', matches)
367 # nt.assert_in('ip._hidden_attr', matches)
368 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
368 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
369
369
370 cfg = Config()
370 cfg = Config()
371 cfg.IPCompleter.omit__names = 2
371 cfg.IPCompleter.omit__names = 2
372 c.update_config(cfg)
372 c.update_config(cfg)
373 with provisionalcompleter():
373 with provisionalcompleter():
374 s,matches = c.complete('ip.')
374 s,matches = c.complete('ip.')
375 completions = set(c.completions('ip.', 3))
375 completions = set(c.completions('ip.', 3))
376
376
377 nt.assert_not_in('ip.__str__', matches)
377 nt.assert_not_in('ip.__str__', matches)
378 nt.assert_not_in(Completion(3,3,'__str__'), completions)
378 nt.assert_not_in(Completion(3,3,'__str__'), completions)
379
379
380 nt.assert_not_in('ip._hidden_attr', matches)
380 nt.assert_not_in('ip._hidden_attr', matches)
381 nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
381 nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
382
382
383 with provisionalcompleter():
383 with provisionalcompleter():
384 s,matches = c.complete('ip._x.')
384 s,matches = c.complete('ip._x.')
385 completions = set(c.completions('ip._x.', 6))
385 completions = set(c.completions('ip._x.', 6))
386
386
387 nt.assert_in('ip._x.keys', matches)
387 nt.assert_in('ip._x.keys', matches)
388 nt.assert_in(Completion(6,6, "keys"), completions)
388 nt.assert_in(Completion(6,6, "keys"), completions)
389
389
390 del ip._hidden_attr
390 del ip._hidden_attr
391 del ip._x
391 del ip._x
392
392
393
393
394 def test_limit_to__all__False_ok():
394 def test_limit_to__all__False_ok():
395 """
395 """
396 Limit to all is deprecated, once we remove it this test can go away.
396 Limit to all is deprecated, once we remove it this test can go away.
397 """
397 """
398 ip = get_ipython()
398 ip = get_ipython()
399 c = ip.Completer
399 c = ip.Completer
400 ip.ex('class D: x=24')
400 ip.ex('class D: x=24')
401 ip.ex('d=D()')
401 ip.ex('d=D()')
402 cfg = Config()
402 cfg = Config()
403 cfg.IPCompleter.limit_to__all__ = False
403 cfg.IPCompleter.limit_to__all__ = False
404 c.update_config(cfg)
404 c.update_config(cfg)
405 s, matches = c.complete('d.')
405 s, matches = c.complete('d.')
406 nt.assert_in('d.x', matches)
406 nt.assert_in('d.x', matches)
407
407
408
408
409 def test_get__all__entries_ok():
409 def test_get__all__entries_ok():
410 class A(object):
410 class A(object):
411 __all__ = ['x', 1]
411 __all__ = ['x', 1]
412 words = completer.get__all__entries(A())
412 words = completer.get__all__entries(A())
413 nt.assert_equal(words, ['x'])
413 nt.assert_equal(words, ['x'])
414
414
415
415
416 def test_get__all__entries_no__all__ok():
416 def test_get__all__entries_no__all__ok():
417 class A(object):
417 class A(object):
418 pass
418 pass
419 words = completer.get__all__entries(A())
419 words = completer.get__all__entries(A())
420 nt.assert_equal(words, [])
420 nt.assert_equal(words, [])
421
421
422
422
423 def test_func_kw_completions():
423 def test_func_kw_completions():
424 ip = get_ipython()
424 ip = get_ipython()
425 c = ip.Completer
425 c = ip.Completer
426 ip.ex('def myfunc(a=1,b=2): return a+b')
426 ip.ex('def myfunc(a=1,b=2): return a+b')
427 s, matches = c.complete(None, 'myfunc(1,b')
427 s, matches = c.complete(None, 'myfunc(1,b')
428 nt.assert_in('b=', matches)
428 nt.assert_in('b=', matches)
429 # Simulate completing with cursor right after b (pos==10):
429 # Simulate completing with cursor right after b (pos==10):
430 s, matches = c.complete(None, 'myfunc(1,b)', 10)
430 s, matches = c.complete(None, 'myfunc(1,b)', 10)
431 nt.assert_in('b=', matches)
431 nt.assert_in('b=', matches)
432 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
432 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
433 nt.assert_in('b=', matches)
433 nt.assert_in('b=', matches)
434 #builtin function
434 #builtin function
435 s, matches = c.complete(None, 'min(k, k')
435 s, matches = c.complete(None, 'min(k, k')
436 nt.assert_in('key=', matches)
436 nt.assert_in('key=', matches)
437
437
438
438
439 def test_default_arguments_from_docstring():
439 def test_default_arguments_from_docstring():
440 ip = get_ipython()
440 ip = get_ipython()
441 c = ip.Completer
441 c = ip.Completer
442 kwd = c._default_arguments_from_docstring(
442 kwd = c._default_arguments_from_docstring(
443 'min(iterable[, key=func]) -> value')
443 'min(iterable[, key=func]) -> value')
444 nt.assert_equal(kwd, ['key'])
444 nt.assert_equal(kwd, ['key'])
445 #with cython type etc
445 #with cython type etc
446 kwd = c._default_arguments_from_docstring(
446 kwd = c._default_arguments_from_docstring(
447 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
447 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
448 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
448 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
449 #white spaces
449 #white spaces
450 kwd = c._default_arguments_from_docstring(
450 kwd = c._default_arguments_from_docstring(
451 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
451 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
452 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
452 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
453
453
454 def test_line_magics():
454 def test_line_magics():
455 ip = get_ipython()
455 ip = get_ipython()
456 c = ip.Completer
456 c = ip.Completer
457 s, matches = c.complete(None, 'lsmag')
457 s, matches = c.complete(None, 'lsmag')
458 nt.assert_in('%lsmagic', matches)
458 nt.assert_in('%lsmagic', matches)
459 s, matches = c.complete(None, '%lsmag')
459 s, matches = c.complete(None, '%lsmag')
460 nt.assert_in('%lsmagic', matches)
460 nt.assert_in('%lsmagic', matches)
461
461
462
462
463 def test_cell_magics():
463 def test_cell_magics():
464 from IPython.core.magic import register_cell_magic
464 from IPython.core.magic import register_cell_magic
465
465
466 @register_cell_magic
466 @register_cell_magic
467 def _foo_cellm(line, cell):
467 def _foo_cellm(line, cell):
468 pass
468 pass
469
469
470 ip = get_ipython()
470 ip = get_ipython()
471 c = ip.Completer
471 c = ip.Completer
472
472
473 s, matches = c.complete(None, '_foo_ce')
473 s, matches = c.complete(None, '_foo_ce')
474 nt.assert_in('%%_foo_cellm', matches)
474 nt.assert_in('%%_foo_cellm', matches)
475 s, matches = c.complete(None, '%%_foo_ce')
475 s, matches = c.complete(None, '%%_foo_ce')
476 nt.assert_in('%%_foo_cellm', matches)
476 nt.assert_in('%%_foo_cellm', matches)
477
477
478
478
479 def test_line_cell_magics():
479 def test_line_cell_magics():
480 from IPython.core.magic import register_line_cell_magic
480 from IPython.core.magic import register_line_cell_magic
481
481
482 @register_line_cell_magic
482 @register_line_cell_magic
483 def _bar_cellm(line, cell):
483 def _bar_cellm(line, cell):
484 pass
484 pass
485
485
486 ip = get_ipython()
486 ip = get_ipython()
487 c = ip.Completer
487 c = ip.Completer
488
488
489 # The policy here is trickier, see comments in completion code. The
489 # The policy here is trickier, see comments in completion code. The
490 # returned values depend on whether the user passes %% or not explicitly,
490 # returned values depend on whether the user passes %% or not explicitly,
491 # and this will show a difference if the same name is both a line and cell
491 # and this will show a difference if the same name is both a line and cell
492 # magic.
492 # magic.
493 s, matches = c.complete(None, '_bar_ce')
493 s, matches = c.complete(None, '_bar_ce')
494 nt.assert_in('%_bar_cellm', matches)
494 nt.assert_in('%_bar_cellm', matches)
495 nt.assert_in('%%_bar_cellm', matches)
495 nt.assert_in('%%_bar_cellm', matches)
496 s, matches = c.complete(None, '%_bar_ce')
496 s, matches = c.complete(None, '%_bar_ce')
497 nt.assert_in('%_bar_cellm', matches)
497 nt.assert_in('%_bar_cellm', matches)
498 nt.assert_in('%%_bar_cellm', matches)
498 nt.assert_in('%%_bar_cellm', matches)
499 s, matches = c.complete(None, '%%_bar_ce')
499 s, matches = c.complete(None, '%%_bar_ce')
500 nt.assert_not_in('%_bar_cellm', matches)
500 nt.assert_not_in('%_bar_cellm', matches)
501 nt.assert_in('%%_bar_cellm', matches)
501 nt.assert_in('%%_bar_cellm', matches)
502
502
503
503
504 def test_magic_completion_order():
504 def test_magic_completion_order():
505
505
506 ip = get_ipython()
506 ip = get_ipython()
507 c = ip.Completer
507 c = ip.Completer
508
508
509 # Test ordering of magics and non-magics with the same name
509 # Test ordering of magics and non-magics with the same name
510 # We want the non-magic first
510 # We want the non-magic first
511
511
512 # Before importing matplotlib, there should only be one option:
512 # Before importing matplotlib, there should only be one option:
513
513
514 text, matches = c.complete('mat')
514 text, matches = c.complete('mat')
515 nt.assert_equal(matches, ["%matplotlib"])
515 nt.assert_equal(matches, ["%matplotlib"])
516
516
517
517
518 ip.run_cell("matplotlib = 1") # introduce name into namespace
518 ip.run_cell("matplotlib = 1") # introduce name into namespace
519
519
520 # After the import, there should be two options, ordered like this:
520 # After the import, there should be two options, ordered like this:
521 text, matches = c.complete('mat')
521 text, matches = c.complete('mat')
522 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
522 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
523
523
524
524
525 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
525 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
526
526
527 # Order of user variable and line and cell magics with same name:
527 # Order of user variable and line and cell magics with same name:
528 text, matches = c.complete('timeit')
528 text, matches = c.complete('timeit')
529 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
529 nt.assert_equal(matches, ["timeit", "%timeit", "%%timeit"])
530
531 def test_match_dict_keys():
532 """
533 Test that match_dict_keys works on a couple of use case does return what
534 expected, and does not crash
535 """
536 delims = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
537
538
539 keys = ['foo', b'far']
540 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2 ,['far'])
541 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2 ,['far'])
542 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2 ,['far'])
543 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2 ,['far'])
544
545 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1 ,['foo'])
546 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1 ,['foo'])
547 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1 ,['foo'])
548 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1 ,['foo'])
549
550 match_dict_keys
530
551
531
552
532 def test_dict_key_completion_string():
553 def test_dict_key_completion_string():
533 """Test dictionary key completion for string keys"""
554 """Test dictionary key completion for string keys"""
534 ip = get_ipython()
555 ip = get_ipython()
535 complete = ip.Completer.complete
556 complete = ip.Completer.complete
536
557
537 ip.user_ns['d'] = {'abc': None}
558 ip.user_ns['d'] = {'abc': None}
538
559
539 # check completion at different stages
560 # check completion at different stages
540 _, matches = complete(line_buffer="d[")
561 _, matches = complete(line_buffer="d[")
541 nt.assert_in("'abc'", matches)
562 nt.assert_in("'abc'", matches)
542 nt.assert_not_in("'abc']", matches)
563 nt.assert_not_in("'abc']", matches)
543
564
544 _, matches = complete(line_buffer="d['")
565 _, matches = complete(line_buffer="d['")
545 nt.assert_in("abc", matches)
566 nt.assert_in("abc", matches)
546 nt.assert_not_in("abc']", matches)
567 nt.assert_not_in("abc']", matches)
547
568
548 _, matches = complete(line_buffer="d['a")
569 _, matches = complete(line_buffer="d['a")
549 nt.assert_in("abc", matches)
570 nt.assert_in("abc", matches)
550 nt.assert_not_in("abc']", matches)
571 nt.assert_not_in("abc']", matches)
551
572
552 # check use of different quoting
573 # check use of different quoting
553 _, matches = complete(line_buffer="d[\"")
574 _, matches = complete(line_buffer="d[\"")
554 nt.assert_in("abc", matches)
575 nt.assert_in("abc", matches)
555 nt.assert_not_in('abc\"]', matches)
576 nt.assert_not_in('abc\"]', matches)
556
577
557 _, matches = complete(line_buffer="d[\"a")
578 _, matches = complete(line_buffer="d[\"a")
558 nt.assert_in("abc", matches)
579 nt.assert_in("abc", matches)
559 nt.assert_not_in('abc\"]', matches)
580 nt.assert_not_in('abc\"]', matches)
560
581
561 # check sensitivity to following context
582 # check sensitivity to following context
562 _, matches = complete(line_buffer="d[]", cursor_pos=2)
583 _, matches = complete(line_buffer="d[]", cursor_pos=2)
563 nt.assert_in("'abc'", matches)
584 nt.assert_in("'abc'", matches)
564
585
565 _, matches = complete(line_buffer="d['']", cursor_pos=3)
586 _, matches = complete(line_buffer="d['']", cursor_pos=3)
566 nt.assert_in("abc", matches)
587 nt.assert_in("abc", matches)
567 nt.assert_not_in("abc'", matches)
588 nt.assert_not_in("abc'", matches)
568 nt.assert_not_in("abc']", matches)
589 nt.assert_not_in("abc']", matches)
569
590
570 # check multiple solutions are correctly returned and that noise is not
591 # check multiple solutions are correctly returned and that noise is not
571 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
592 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
572 5: None}
593 5: None}
573
594
574 _, matches = complete(line_buffer="d['a")
595 _, matches = complete(line_buffer="d['a")
575 nt.assert_in("abc", matches)
596 nt.assert_in("abc", matches)
576 nt.assert_in("abd", matches)
597 nt.assert_in("abd", matches)
577 nt.assert_not_in("bad", matches)
598 nt.assert_not_in("bad", matches)
578 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
599 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
579
600
580 # check escaping and whitespace
601 # check escaping and whitespace
581 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
602 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
582 _, matches = complete(line_buffer="d['a")
603 _, matches = complete(line_buffer="d['a")
583 nt.assert_in("a\\nb", matches)
604 nt.assert_in("a\\nb", matches)
584 nt.assert_in("a\\'b", matches)
605 nt.assert_in("a\\'b", matches)
585 nt.assert_in("a\"b", matches)
606 nt.assert_in("a\"b", matches)
586 nt.assert_in("a word", matches)
607 nt.assert_in("a word", matches)
587 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
608 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
588
609
589 # - can complete on non-initial word of the string
610 # - can complete on non-initial word of the string
590 _, matches = complete(line_buffer="d['a w")
611 _, matches = complete(line_buffer="d['a w")
591 nt.assert_in("word", matches)
612 nt.assert_in("word", matches)
592
613
593 # - understands quote escaping
614 # - understands quote escaping
594 _, matches = complete(line_buffer="d['a\\'")
615 _, matches = complete(line_buffer="d['a\\'")
595 nt.assert_in("b", matches)
616 nt.assert_in("b", matches)
596
617
597 # - default quoting should work like repr
618 # - default quoting should work like repr
598 _, matches = complete(line_buffer="d[")
619 _, matches = complete(line_buffer="d[")
599 nt.assert_in("\"a'b\"", matches)
620 nt.assert_in("\"a'b\"", matches)
600
621
601 # - when opening quote with ", possible to match with unescaped apostrophe
622 # - when opening quote with ", possible to match with unescaped apostrophe
602 _, matches = complete(line_buffer="d[\"a'")
623 _, matches = complete(line_buffer="d[\"a'")
603 nt.assert_in("b", matches)
624 nt.assert_in("b", matches)
604
625
605 # need to not split at delims that readline won't split at
626 # need to not split at delims that readline won't split at
606 if '-' not in ip.Completer.splitter.delims:
627 if '-' not in ip.Completer.splitter.delims:
607 ip.user_ns['d'] = {'before-after': None}
628 ip.user_ns['d'] = {'before-after': None}
608 _, matches = complete(line_buffer="d['before-af")
629 _, matches = complete(line_buffer="d['before-af")
609 nt.assert_in('before-after', matches)
630 nt.assert_in('before-after', matches)
610
631
611 def test_dict_key_completion_contexts():
632 def test_dict_key_completion_contexts():
612 """Test expression contexts in which dict key completion occurs"""
633 """Test expression contexts in which dict key completion occurs"""
613 ip = get_ipython()
634 ip = get_ipython()
614 complete = ip.Completer.complete
635 complete = ip.Completer.complete
615 d = {'abc': None}
636 d = {'abc': None}
616 ip.user_ns['d'] = d
637 ip.user_ns['d'] = d
617
638
618 class C:
639 class C:
619 data = d
640 data = d
620 ip.user_ns['C'] = C
641 ip.user_ns['C'] = C
621 ip.user_ns['get'] = lambda: d
642 ip.user_ns['get'] = lambda: d
622
643
623 def assert_no_completion(**kwargs):
644 def assert_no_completion(**kwargs):
624 _, matches = complete(**kwargs)
645 _, matches = complete(**kwargs)
625 nt.assert_not_in('abc', matches)
646 nt.assert_not_in('abc', matches)
626 nt.assert_not_in('abc\'', matches)
647 nt.assert_not_in('abc\'', matches)
627 nt.assert_not_in('abc\']', matches)
648 nt.assert_not_in('abc\']', matches)
628 nt.assert_not_in('\'abc\'', matches)
649 nt.assert_not_in('\'abc\'', matches)
629 nt.assert_not_in('\'abc\']', matches)
650 nt.assert_not_in('\'abc\']', matches)
630
651
631 def assert_completion(**kwargs):
652 def assert_completion(**kwargs):
632 _, matches = complete(**kwargs)
653 _, matches = complete(**kwargs)
633 nt.assert_in("'abc'", matches)
654 nt.assert_in("'abc'", matches)
634 nt.assert_not_in("'abc']", matches)
655 nt.assert_not_in("'abc']", matches)
635
656
636 # no completion after string closed, even if reopened
657 # no completion after string closed, even if reopened
637 assert_no_completion(line_buffer="d['a'")
658 assert_no_completion(line_buffer="d['a'")
638 assert_no_completion(line_buffer="d[\"a\"")
659 assert_no_completion(line_buffer="d[\"a\"")
639 assert_no_completion(line_buffer="d['a' + ")
660 assert_no_completion(line_buffer="d['a' + ")
640 assert_no_completion(line_buffer="d['a' + '")
661 assert_no_completion(line_buffer="d['a' + '")
641
662
642 # completion in non-trivial expressions
663 # completion in non-trivial expressions
643 assert_completion(line_buffer="+ d[")
664 assert_completion(line_buffer="+ d[")
644 assert_completion(line_buffer="(d[")
665 assert_completion(line_buffer="(d[")
645 assert_completion(line_buffer="C.data[")
666 assert_completion(line_buffer="C.data[")
646
667
647 # greedy flag
668 # greedy flag
648 def assert_completion(**kwargs):
669 def assert_completion(**kwargs):
649 _, matches = complete(**kwargs)
670 _, matches = complete(**kwargs)
650 nt.assert_in("get()['abc']", matches)
671 nt.assert_in("get()['abc']", matches)
651
672
652 assert_no_completion(line_buffer="get()[")
673 assert_no_completion(line_buffer="get()[")
653 with greedy_completion():
674 with greedy_completion():
654 assert_completion(line_buffer="get()[")
675 assert_completion(line_buffer="get()[")
655 assert_completion(line_buffer="get()['")
676 assert_completion(line_buffer="get()['")
656 assert_completion(line_buffer="get()['a")
677 assert_completion(line_buffer="get()['a")
657 assert_completion(line_buffer="get()['ab")
678 assert_completion(line_buffer="get()['ab")
658 assert_completion(line_buffer="get()['abc")
679 assert_completion(line_buffer="get()['abc")
659
680
660
681
661
682
662 def test_dict_key_completion_bytes():
683 def test_dict_key_completion_bytes():
663 """Test handling of bytes in dict key completion"""
684 """Test handling of bytes in dict key completion"""
664 ip = get_ipython()
685 ip = get_ipython()
665 complete = ip.Completer.complete
686 complete = ip.Completer.complete
666
687
667 ip.user_ns['d'] = {'abc': None, b'abd': None}
688 ip.user_ns['d'] = {'abc': None, b'abd': None}
668
689
669 _, matches = complete(line_buffer="d[")
690 _, matches = complete(line_buffer="d[")
670 nt.assert_in("'abc'", matches)
691 nt.assert_in("'abc'", matches)
671 nt.assert_in("b'abd'", matches)
692 nt.assert_in("b'abd'", matches)
672
693
673 if False: # not currently implemented
694 if False: # not currently implemented
674 _, matches = complete(line_buffer="d[b")
695 _, matches = complete(line_buffer="d[b")
675 nt.assert_in("b'abd'", matches)
696 nt.assert_in("b'abd'", matches)
676 nt.assert_not_in("b'abc'", matches)
697 nt.assert_not_in("b'abc'", matches)
677
698
678 _, matches = complete(line_buffer="d[b'")
699 _, matches = complete(line_buffer="d[b'")
679 nt.assert_in("abd", matches)
700 nt.assert_in("abd", matches)
680 nt.assert_not_in("abc", matches)
701 nt.assert_not_in("abc", matches)
681
702
682 _, matches = complete(line_buffer="d[B'")
703 _, matches = complete(line_buffer="d[B'")
683 nt.assert_in("abd", matches)
704 nt.assert_in("abd", matches)
684 nt.assert_not_in("abc", matches)
705 nt.assert_not_in("abc", matches)
685
706
686 _, matches = complete(line_buffer="d['")
707 _, matches = complete(line_buffer="d['")
687 nt.assert_in("abc", matches)
708 nt.assert_in("abc", matches)
688 nt.assert_not_in("abd", matches)
709 nt.assert_not_in("abd", matches)
689
710
690
711
691 def test_dict_key_completion_unicode_py3():
712 def test_dict_key_completion_unicode_py3():
692 """Test handling of unicode in dict key completion"""
713 """Test handling of unicode in dict key completion"""
693 ip = get_ipython()
714 ip = get_ipython()
694 complete = ip.Completer.complete
715 complete = ip.Completer.complete
695
716
696 ip.user_ns['d'] = {u'a\u05d0': None}
717 ip.user_ns['d'] = {u'a\u05d0': None}
697
718
698 # query using escape
719 # query using escape
699 if sys.platform != 'win32':
720 if sys.platform != 'win32':
700 # Known failure on Windows
721 # Known failure on Windows
701 _, matches = complete(line_buffer="d['a\\u05d0")
722 _, matches = complete(line_buffer="d['a\\u05d0")
702 nt.assert_in("u05d0", matches) # tokenized after \\
723 nt.assert_in("u05d0", matches) # tokenized after \\
703
724
704 # query using character
725 # query using character
705 _, matches = complete(line_buffer="d['a\u05d0")
726 _, matches = complete(line_buffer="d['a\u05d0")
706 nt.assert_in(u"a\u05d0", matches)
727 nt.assert_in(u"a\u05d0", matches)
707
728
708 with greedy_completion():
729 with greedy_completion():
709 # query using escape
730 # query using escape
710 _, matches = complete(line_buffer="d['a\\u05d0")
731 _, matches = complete(line_buffer="d['a\\u05d0")
711 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
732 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
712
733
713 # query using character
734 # query using character
714 _, matches = complete(line_buffer="d['a\u05d0")
735 _, matches = complete(line_buffer="d['a\u05d0")
715 nt.assert_in(u"d['a\u05d0']", matches)
736 nt.assert_in(u"d['a\u05d0']", matches)
716
737
717
738
718
739
719 @dec.skip_without('numpy')
740 @dec.skip_without('numpy')
720 def test_struct_array_key_completion():
741 def test_struct_array_key_completion():
721 """Test dict key completion applies to numpy struct arrays"""
742 """Test dict key completion applies to numpy struct arrays"""
722 import numpy
743 import numpy
723 ip = get_ipython()
744 ip = get_ipython()
724 complete = ip.Completer.complete
745 complete = ip.Completer.complete
725 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
746 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
726 _, matches = complete(line_buffer="d['")
747 _, matches = complete(line_buffer="d['")
727 nt.assert_in("hello", matches)
748 nt.assert_in("hello", matches)
728 nt.assert_in("world", matches)
749 nt.assert_in("world", matches)
729 # complete on the numpy struct itself
750 # complete on the numpy struct itself
730 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
751 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
731 ('my_data', '>f4', 5)])
752 ('my_data', '>f4', 5)])
732 x = numpy.zeros(2, dtype=dt)
753 x = numpy.zeros(2, dtype=dt)
733 ip.user_ns['d'] = x[1]
754 ip.user_ns['d'] = x[1]
734 _, matches = complete(line_buffer="d['")
755 _, matches = complete(line_buffer="d['")
735 nt.assert_in("my_head", matches)
756 nt.assert_in("my_head", matches)
736 nt.assert_in("my_data", matches)
757 nt.assert_in("my_data", matches)
737 # complete on a nested level
758 # complete on a nested level
738 with greedy_completion():
759 with greedy_completion():
739 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
760 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
740 _, matches = complete(line_buffer="d[1]['my_head']['")
761 _, matches = complete(line_buffer="d[1]['my_head']['")
741 nt.assert_true(any(["my_dt" in m for m in matches]))
762 nt.assert_true(any(["my_dt" in m for m in matches]))
742 nt.assert_true(any(["my_df" in m for m in matches]))
763 nt.assert_true(any(["my_df" in m for m in matches]))
743
764
744
765
745 @dec.skip_without('pandas')
766 @dec.skip_without('pandas')
746 def test_dataframe_key_completion():
767 def test_dataframe_key_completion():
747 """Test dict key completion applies to pandas DataFrames"""
768 """Test dict key completion applies to pandas DataFrames"""
748 import pandas
769 import pandas
749 ip = get_ipython()
770 ip = get_ipython()
750 complete = ip.Completer.complete
771 complete = ip.Completer.complete
751 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
772 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
752 _, matches = complete(line_buffer="d['")
773 _, matches = complete(line_buffer="d['")
753 nt.assert_in("hello", matches)
774 nt.assert_in("hello", matches)
754 nt.assert_in("world", matches)
775 nt.assert_in("world", matches)
755
776
756
777
757 def test_dict_key_completion_invalids():
778 def test_dict_key_completion_invalids():
758 """Smoke test cases dict key completion can't handle"""
779 """Smoke test cases dict key completion can't handle"""
759 ip = get_ipython()
780 ip = get_ipython()
760 complete = ip.Completer.complete
781 complete = ip.Completer.complete
761
782
762 ip.user_ns['no_getitem'] = None
783 ip.user_ns['no_getitem'] = None
763 ip.user_ns['no_keys'] = []
784 ip.user_ns['no_keys'] = []
764 ip.user_ns['cant_call_keys'] = dict
785 ip.user_ns['cant_call_keys'] = dict
765 ip.user_ns['empty'] = {}
786 ip.user_ns['empty'] = {}
766 ip.user_ns['d'] = {'abc': 5}
787 ip.user_ns['d'] = {'abc': 5}
767
788
768 _, matches = complete(line_buffer="no_getitem['")
789 _, matches = complete(line_buffer="no_getitem['")
769 _, matches = complete(line_buffer="no_keys['")
790 _, matches = complete(line_buffer="no_keys['")
770 _, matches = complete(line_buffer="cant_call_keys['")
791 _, matches = complete(line_buffer="cant_call_keys['")
771 _, matches = complete(line_buffer="empty['")
792 _, matches = complete(line_buffer="empty['")
772 _, matches = complete(line_buffer="name_error['")
793 _, matches = complete(line_buffer="name_error['")
773 _, matches = complete(line_buffer="d['\\") # incomplete escape
794 _, matches = complete(line_buffer="d['\\") # incomplete escape
774
795
775 class KeyCompletable(object):
796 class KeyCompletable(object):
776 def __init__(self, things=()):
797 def __init__(self, things=()):
777 self.things = things
798 self.things = things
778
799
779 def _ipython_key_completions_(self):
800 def _ipython_key_completions_(self):
780 return list(self.things)
801 return list(self.things)
781
802
782 def test_object_key_completion():
803 def test_object_key_completion():
783 ip = get_ipython()
804 ip = get_ipython()
784 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
805 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
785
806
786 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
807 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
787 nt.assert_in('qwerty', matches)
808 nt.assert_in('qwerty', matches)
788 nt.assert_in('qwick', matches)
809 nt.assert_in('qwick', matches)
789
810
790
811
791 def test_tryimport():
812 def test_tryimport():
792 """
813 """
793 Test that try-import don't crash on trailing dot, and import modules before
814 Test that try-import don't crash on trailing dot, and import modules before
794 """
815 """
795 from IPython.core.completerlib import try_import
816 from IPython.core.completerlib import try_import
796 assert(try_import("IPython."))
817 assert(try_import("IPython."))
797
818
798
819
799 def test_aimport_module_completer():
820 def test_aimport_module_completer():
800 ip = get_ipython()
821 ip = get_ipython()
801 _, matches = ip.complete('i', '%aimport i')
822 _, matches = ip.complete('i', '%aimport i')
802 nt.assert_in('io', matches)
823 nt.assert_in('io', matches)
803 nt.assert_not_in('int', matches)
824 nt.assert_not_in('int', matches)
804
825
805 def test_nested_import_module_completer():
826 def test_nested_import_module_completer():
806 ip = get_ipython()
827 ip = get_ipython()
807 _, matches = ip.complete(None, 'import IPython.co', 17)
828 _, matches = ip.complete(None, 'import IPython.co', 17)
808 nt.assert_in('IPython.core', matches)
829 nt.assert_in('IPython.core', matches)
809 nt.assert_not_in('import IPython.core', matches)
830 nt.assert_not_in('import IPython.core', matches)
810 nt.assert_not_in('IPython.display', matches)
831 nt.assert_not_in('IPython.display', matches)
811
832
812 def test_import_module_completer():
833 def test_import_module_completer():
813 ip = get_ipython()
834 ip = get_ipython()
814 _, matches = ip.complete('i', 'import i')
835 _, matches = ip.complete('i', 'import i')
815 nt.assert_in('io', matches)
836 nt.assert_in('io', matches)
816 nt.assert_not_in('int', matches)
837 nt.assert_not_in('int', matches)
817
838
818 def test_from_module_completer():
839 def test_from_module_completer():
819 ip = get_ipython()
840 ip = get_ipython()
820 _, matches = ip.complete('B', 'from io import B', 16)
841 _, matches = ip.complete('B', 'from io import B', 16)
821 nt.assert_in('BytesIO', matches)
842 nt.assert_in('BytesIO', matches)
822 nt.assert_not_in('BaseException', matches)
843 nt.assert_not_in('BaseException', matches)
General Comments 0
You need to be logged in to leave comments. Login now