##// END OF EJS Templates
Merge pull request #10285 from Carreau/deduplicate-completions...
Fernando Perez -
r23359:9f622df4 merge
parent child Browse files
Show More
@@ -1,1760 +1,1796 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Completion for IPython.
2 """Completion for IPython.
3
3
4 This module started as fork of the rlcompleter module in the Python standard
4 This module started as fork of the rlcompleter module in the Python standard
5 library. The original enhancements made to rlcompleter have been sent
5 library. The original enhancements made to rlcompleter have been sent
6 upstream and were accepted as of Python 2.3,
6 upstream and were accepted as of Python 2.3,
7
7
8 This module now support a wide variety of completion mechanism both available
8 This module now support a wide variety of completion mechanism both available
9 for normal classic Python code, as well as completer for IPython specific
9 for normal classic Python code, as well as completer for IPython specific
10 Syntax like magics.
10 Syntax like magics.
11
11
12 Experimental
12 Experimental
13 ============
13 ============
14
14
15 Starting with IPython 6.0, this module can make use of the Jedi library to
15 Starting with IPython 6.0, this module can make use of the Jedi library to
16 generate completions both using static analysis of the code, and dynamically
16 generate completions both using static analysis of the code, and dynamically
17 inspecting multiple namespaces. The APIs attached to this new mechanism is
17 inspecting multiple namespaces. The APIs attached to this new mechanism is
18 unstable and will raise unless use in an :any:`provisionalcompleter` context
18 unstable and will raise unless use in an :any:`provisionalcompleter` context
19 manager.
19 manager.
20
20
21 You will find that the following are experimental:
21 You will find that the following are experimental:
22
22
23 - :any:`provisionalcompleter`
23 - :any:`provisionalcompleter`
24 - :any:`IPCompleter.completions`
24 - :any:`IPCompleter.completions`
25 - :any:`Completion`
25 - :any:`Completion`
26 - :any:`rectify_completions`
26 - :any:`rectify_completions`
27
27
28 .. note::
28 .. note::
29
29
30 better name for :any:`rectify_completions` ?
30 better name for :any:`rectify_completions` ?
31
31
32 We welcome any feedback on these new API, and we also encourage you to try this
32 We welcome any feedback on these new API, and we also encourage you to try this
33 module in debug mode (start IPython with ``--Completer.debug=True``) in order
33 module in debug mode (start IPython with ``--Completer.debug=True``) in order
34 to have extra logging information is :any:`jedi` is crashing, or if current
34 to have extra logging information is :any:`jedi` is crashing, or if current
35 IPython completer pending deprecations are returning results not yet handled
35 IPython completer pending deprecations are returning results not yet handled
36 by :any:`jedi`.
36 by :any:`jedi`.
37
37
38 Using Jedi for tab completion allow snippets like the following to work without
38 Using Jedi for tab completion allow snippets like the following to work without
39 having to execute any code:
39 having to execute any code:
40
40
41 >>> myvar = ['hello', 42]
41 >>> myvar = ['hello', 42]
42 ... myvar[1].bi<tab>
42 ... myvar[1].bi<tab>
43
43
44 Tab completion will be able to infer that ``myvar[1]`` is a real number without
44 Tab completion will be able to infer that ``myvar[1]`` is a real number without
45 executing any code unlike the previously available ``IPCompleter.greedy``
45 executing any code unlike the previously available ``IPCompleter.greedy``
46 option.
46 option.
47
47
48 Be sure to update :any:`jedi` to the latest stable version or to try the
48 Be sure to update :any:`jedi` to the latest stable version or to try the
49 current development version to get better completions.
49 current development version to get better completions.
50 """
50 """
51
51
52 # skip module docstests
52 # skip module docstests
53 skip_doctest = True
53 skip_doctest = True
54
54
55 # Copyright (c) IPython Development Team.
55 # Copyright (c) IPython Development Team.
56 # Distributed under the terms of the Modified BSD License.
56 # Distributed under the terms of the Modified BSD License.
57 #
57 #
58 # Some of this code originated from rlcompleter in the Python standard library
58 # Some of this code originated from rlcompleter in the Python standard library
59 # Copyright (C) 2001 Python Software Foundation, www.python.org
59 # Copyright (C) 2001 Python Software Foundation, www.python.org
60
60
61
61
62 import __main__
62 import __main__
63 import builtins as builtin_mod
63 import builtins as builtin_mod
64 import glob
64 import glob
65 import time
65 import time
66 import inspect
66 import inspect
67 import itertools
67 import itertools
68 import keyword
68 import keyword
69 import os
69 import os
70 import re
70 import re
71 import sys
71 import sys
72 import unicodedata
72 import unicodedata
73 import string
73 import string
74 import warnings
74 import warnings
75
75
76 from contextlib import contextmanager
76 from contextlib import contextmanager
77 from importlib import import_module
77 from importlib import import_module
78 from typing import Iterator, List
78 from typing import Iterator, List
79 from types import SimpleNamespace
79 from types import SimpleNamespace
80
80
81 from traitlets.config.configurable import Configurable
81 from traitlets.config.configurable import Configurable
82 from IPython.core.error import TryNext
82 from IPython.core.error import TryNext
83 from IPython.core.inputsplitter import ESC_MAGIC
83 from IPython.core.inputsplitter import ESC_MAGIC
84 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
84 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
85 from IPython.utils import generics
85 from IPython.utils import generics
86 from IPython.utils.dir2 import dir2, get_real_method
86 from IPython.utils.dir2 import dir2, get_real_method
87 from IPython.utils.process import arg_split
87 from IPython.utils.process import arg_split
88 from IPython.utils.py3compat import cast_unicode_py2
88 from IPython.utils.py3compat import cast_unicode_py2
89 from traitlets import Bool, Enum, observe, Int
89 from traitlets import Bool, Enum, observe, Int
90
90
91 try:
91 try:
92 import jedi
92 import jedi
93 import jedi.api.helpers
93 import jedi.api.helpers
94 JEDI_INSTALLED = True
94 JEDI_INSTALLED = True
95 except ImportError:
95 except ImportError:
96 JEDI_INSTALLED = False
96 JEDI_INSTALLED = False
97 #-----------------------------------------------------------------------------
97 #-----------------------------------------------------------------------------
98 # Globals
98 # Globals
99 #-----------------------------------------------------------------------------
99 #-----------------------------------------------------------------------------
100
100
101 # Public API
101 # Public API
102 __all__ = ['Completer','IPCompleter']
102 __all__ = ['Completer','IPCompleter']
103
103
104 if sys.platform == 'win32':
104 if sys.platform == 'win32':
105 PROTECTABLES = ' '
105 PROTECTABLES = ' '
106 else:
106 else:
107 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
107 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
108
108
109
109
110 _deprecation_readline_sentinel = object()
110 _deprecation_readline_sentinel = object()
111
111
112
112
113 class ProvisionalCompleterWarning(FutureWarning):
113 class ProvisionalCompleterWarning(FutureWarning):
114 """
114 """
115 Exception raise by an experimental feature in this module.
115 Exception raise by an experimental feature in this module.
116
116
117 Wrap code in :any:`provisionalcompleter` context manager if you
117 Wrap code in :any:`provisionalcompleter` context manager if you
118 are certain you want to use an unstable feature.
118 are certain you want to use an unstable feature.
119 """
119 """
120 pass
120 pass
121
121
122 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
122 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
123
123
124 @contextmanager
124 @contextmanager
125 def provisionalcompleter(action='ignore'):
125 def provisionalcompleter(action='ignore'):
126 """
126 """
127
127
128
128
129 This contest manager has to be used in any place where unstable completer
129 This contest manager has to be used in any place where unstable completer
130 behavior and API may be called.
130 behavior and API may be called.
131
131
132 >>> with provisionalcompleter():
132 >>> with provisionalcompleter():
133 ... completer.do_experimetal_things() # works
133 ... completer.do_experimetal_things() # works
134
134
135 >>> completer.do_experimental_things() # raises.
135 >>> completer.do_experimental_things() # raises.
136
136
137 .. note:: Unstable
137 .. note:: Unstable
138
138
139 By using this context manager you agree that the API in use may change
139 By using this context manager you agree that the API in use may change
140 without warning, and that you won't complain if they do so.
140 without warning, and that you won't complain if they do so.
141
141
142 You also understand that if the API is not to you liking you should report
142 You also understand that if the API is not to you liking you should report
143 a bug to explain your use case upstream and improve the API and will loose
143 a bug to explain your use case upstream and improve the API and will loose
144 credibility if you complain after the API is make stable.
144 credibility if you complain after the API is make stable.
145
145
146 We'll be happy to get your feedback , feature request and improvement on
146 We'll be happy to get your feedback , feature request and improvement on
147 any of the unstable APIs !
147 any of the unstable APIs !
148 """
148 """
149 with warnings.catch_warnings():
149 with warnings.catch_warnings():
150 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
150 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
151 yield
151 yield
152
152
153
153
154 def has_open_quotes(s):
154 def has_open_quotes(s):
155 """Return whether a string has open quotes.
155 """Return whether a string has open quotes.
156
156
157 This simply counts whether the number of quote characters of either type in
157 This simply counts whether the number of quote characters of either type in
158 the string is odd.
158 the string is odd.
159
159
160 Returns
160 Returns
161 -------
161 -------
162 If there is an open quote, the quote character is returned. Else, return
162 If there is an open quote, the quote character is returned. Else, return
163 False.
163 False.
164 """
164 """
165 # We check " first, then ', so complex cases with nested quotes will get
165 # We check " first, then ', so complex cases with nested quotes will get
166 # the " to take precedence.
166 # the " to take precedence.
167 if s.count('"') % 2:
167 if s.count('"') % 2:
168 return '"'
168 return '"'
169 elif s.count("'") % 2:
169 elif s.count("'") % 2:
170 return "'"
170 return "'"
171 else:
171 else:
172 return False
172 return False
173
173
174
174
175 def protect_filename(s):
175 def protect_filename(s):
176 """Escape a string to protect certain characters."""
176 """Escape a string to protect certain characters."""
177 if set(s) & set(PROTECTABLES):
177 if set(s) & set(PROTECTABLES):
178 if sys.platform == "win32":
178 if sys.platform == "win32":
179 return '"' + s + '"'
179 return '"' + s + '"'
180 else:
180 else:
181 return "".join(("\\" + c if c in PROTECTABLES else c) for c in s)
181 return "".join(("\\" + c if c in PROTECTABLES else c) for c in s)
182 else:
182 else:
183 return s
183 return s
184
184
185
185
186 def expand_user(path):
186 def expand_user(path):
187 """Expand ``~``-style usernames in strings.
187 """Expand ``~``-style usernames in strings.
188
188
189 This is similar to :func:`os.path.expanduser`, but it computes and returns
189 This is similar to :func:`os.path.expanduser`, but it computes and returns
190 extra information that will be useful if the input was being used in
190 extra information that will be useful if the input was being used in
191 computing completions, and you wish to return the completions with the
191 computing completions, and you wish to return the completions with the
192 original '~' instead of its expanded value.
192 original '~' instead of its expanded value.
193
193
194 Parameters
194 Parameters
195 ----------
195 ----------
196 path : str
196 path : str
197 String to be expanded. If no ~ is present, the output is the same as the
197 String to be expanded. If no ~ is present, the output is the same as the
198 input.
198 input.
199
199
200 Returns
200 Returns
201 -------
201 -------
202 newpath : str
202 newpath : str
203 Result of ~ expansion in the input path.
203 Result of ~ expansion in the input path.
204 tilde_expand : bool
204 tilde_expand : bool
205 Whether any expansion was performed or not.
205 Whether any expansion was performed or not.
206 tilde_val : str
206 tilde_val : str
207 The value that ~ was replaced with.
207 The value that ~ was replaced with.
208 """
208 """
209 # Default values
209 # Default values
210 tilde_expand = False
210 tilde_expand = False
211 tilde_val = ''
211 tilde_val = ''
212 newpath = path
212 newpath = path
213
213
214 if path.startswith('~'):
214 if path.startswith('~'):
215 tilde_expand = True
215 tilde_expand = True
216 rest = len(path)-1
216 rest = len(path)-1
217 newpath = os.path.expanduser(path)
217 newpath = os.path.expanduser(path)
218 if rest:
218 if rest:
219 tilde_val = newpath[:-rest]
219 tilde_val = newpath[:-rest]
220 else:
220 else:
221 tilde_val = newpath
221 tilde_val = newpath
222
222
223 return newpath, tilde_expand, tilde_val
223 return newpath, tilde_expand, tilde_val
224
224
225
225
226 def compress_user(path, tilde_expand, tilde_val):
226 def compress_user(path, tilde_expand, tilde_val):
227 """Does the opposite of expand_user, with its outputs.
227 """Does the opposite of expand_user, with its outputs.
228 """
228 """
229 if tilde_expand:
229 if tilde_expand:
230 return path.replace(tilde_val, '~')
230 return path.replace(tilde_val, '~')
231 else:
231 else:
232 return path
232 return path
233
233
234
234
235 def completions_sorting_key(word):
235 def completions_sorting_key(word):
236 """key for sorting completions
236 """key for sorting completions
237
237
238 This does several things:
238 This does several things:
239
239
240 - Lowercase all completions, so they are sorted alphabetically with
240 - Lowercase all completions, so they are sorted alphabetically with
241 upper and lower case words mingled
241 upper and lower case words mingled
242 - Demote any completions starting with underscores to the end
242 - Demote any completions starting with underscores to the end
243 - Insert any %magic and %%cellmagic completions in the alphabetical order
243 - Insert any %magic and %%cellmagic completions in the alphabetical order
244 by their name
244 by their name
245 """
245 """
246 # Case insensitive sort
246 # Case insensitive sort
247 word = word.lower()
247 word = word.lower()
248
248
249 prio1, prio2 = 0, 0
249 prio1, prio2 = 0, 0
250
250
251 if word.startswith('__'):
251 if word.startswith('__'):
252 prio1 = 2
252 prio1 = 2
253 elif word.startswith('_'):
253 elif word.startswith('_'):
254 prio1 = 1
254 prio1 = 1
255
255
256 if word.endswith('='):
256 if word.endswith('='):
257 prio1 = -1
257 prio1 = -1
258
258
259 if word.startswith('%%'):
259 if word.startswith('%%'):
260 # If there's another % in there, this is something else, so leave it alone
260 # If there's another % in there, this is something else, so leave it alone
261 if not "%" in word[2:]:
261 if not "%" in word[2:]:
262 word = word[2:]
262 word = word[2:]
263 prio2 = 2
263 prio2 = 2
264 elif word.startswith('%'):
264 elif word.startswith('%'):
265 if not "%" in word[1:]:
265 if not "%" in word[1:]:
266 word = word[1:]
266 word = word[1:]
267 prio2 = 1
267 prio2 = 1
268
268
269 return prio1, word, prio2
269 return prio1, word, prio2
270
270
271
271
272 class _FakeJediCompletion:
272 class _FakeJediCompletion:
273 """
273 """
274 This is a workaround to communicate to the UI that Jedi has crashed and to
274 This is a workaround to communicate to the UI that Jedi has crashed and to
275 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
275 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
276
276
277 Added in IPython 6.0 so should likely be removed for 7.0
277 Added in IPython 6.0 so should likely be removed for 7.0
278
278
279 """
279 """
280
280
281 def __init__(self, name):
281 def __init__(self, name):
282
282
283 self.name = name
283 self.name = name
284 self.complete = name
284 self.complete = name
285 self.type = 'crashed'
285 self.type = 'crashed'
286 self.name_with_symbols = name
286 self.name_with_symbols = name
287
287
288 def __repr__(self):
288 def __repr__(self):
289 return '<Fake completion object jedi has crashed>'
289 return '<Fake completion object jedi has crashed>'
290
290
291
291
292 class Completion:
292 class Completion:
293 """
293 """
294 Completion object used and return by IPython completers.
294 Completion object used and return by IPython completers.
295
295
296 .. warning:: Unstable
296 .. warning:: Unstable
297
297
298 This function is unstable, API may change without warning.
298 This function is unstable, API may change without warning.
299 It will also raise unless use in proper context manager.
299 It will also raise unless use in proper context manager.
300
300
301 This act as a middle ground :any:`Completion` object between the
301 This act as a middle ground :any:`Completion` object between the
302 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
302 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
303 object. While Jedi need a lot of information about evaluator and how the
303 object. While Jedi need a lot of information about evaluator and how the
304 code should be ran/inspected, PromptToolkit (and other frontend) mostly
304 code should be ran/inspected, PromptToolkit (and other frontend) mostly
305 need user facing information.
305 need user facing information.
306
306
307 - Which range should be replaced replaced by what.
307 - Which range should be replaced replaced by what.
308 - Some metadata (like completion type), or meta informations to displayed to
308 - Some metadata (like completion type), or meta informations to displayed to
309 the use user.
309 the use user.
310
310
311 For debugging purpose we can also store the origin of the completion (``jedi``,
311 For debugging purpose we can also store the origin of the completion (``jedi``,
312 ``IPython.python_matches``, ``IPython.magics_matches``...).
312 ``IPython.python_matches``, ``IPython.magics_matches``...).
313 """
313 """
314
314
315 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin=''):
315 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin=''):
316 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
316 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
317 "It may change without warnings. "
317 "It may change without warnings. "
318 "Use in corresponding context manager.",
318 "Use in corresponding context manager.",
319 category=ProvisionalCompleterWarning, stacklevel=2)
319 category=ProvisionalCompleterWarning, stacklevel=2)
320
320
321 self.start = start
321 self.start = start
322 self.end = end
322 self.end = end
323 self.text = text
323 self.text = text
324 self.type = type
324 self.type = type
325 self._origin = _origin
325 self._origin = _origin
326
326
327 def __repr__(self):
327 def __repr__(self):
328 return '<Completion start=%s end=%s text=%r type=%r>' % (self.start, self.end, self.text, self.type or '?')
328 return '<Completion start=%s end=%s text=%r type=%r>' % (self.start, self.end, self.text, self.type or '?')
329
329
330 def __eq__(self, other)->Bool:
330 def __eq__(self, other)->Bool:
331 """
331 """
332 Equality and hash do not hash the type (as some completer may not be
332 Equality and hash do not hash the type (as some completer may not be
333 able to infer the type), but are use to (partially) de-duplicate
333 able to infer the type), but are use to (partially) de-duplicate
334 completion.
334 completion.
335
335
336 Completely de-duplicating completion is a bit tricker that just
336 Completely de-duplicating completion is a bit tricker that just
337 comparing as it depends on surrounding text, which Completions are not
337 comparing as it depends on surrounding text, which Completions are not
338 aware of.
338 aware of.
339 """
339 """
340 return self.start == other.start and \
340 return self.start == other.start and \
341 self.end == other.end and \
341 self.end == other.end and \
342 self.text == other.text
342 self.text == other.text
343
343
344 def __hash__(self):
344 def __hash__(self):
345 return hash((self.start, self.end, self.text))
345 return hash((self.start, self.end, self.text))
346
346
347
347 _IC = Iterator[Completion]
348 _IC = Iterator[Completion]
348
349
349 def rectify_completions(text:str, completion:_IC, *, _debug=False)->_IC:
350
351 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
350 """
352 """
351 Rectify a set of completion to all have the same ``start`` and ``end``
353 Deduplicate a set of completions.
354
355 .. warning:: Unstable
356
357 This function is unstable, API may change without warning.
358
359 Parameters
360 ----------
361 text: str
362 text that should be completed.
363 completions: Iterator[Completion]
364 iterator over the completions to deduplicate
365
366
367 Completions coming from multiple sources, may be different but end up having
368 the same effect when applied to ``text``. If this is the case, this will
369 consider completions as equal and only emit the first encountered.
370
371 Not folded in `completions()` yet for debugging purpose, and to detect when
372 the IPython completer does return things that Jedi does not, but should be
373 at some point.
374 """
375 completions = list(completions)
376 if not completions:
377 return
378
379 new_start = min(c.start for c in completions)
380 new_end = max(c.end for c in completions)
381
382 seen = set()
383 for c in completions:
384 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
385 if new_text not in seen:
386 yield c
387 seen.add(new_text)
388
389
390 def rectify_completions(text: str, completions: _IC, *, _debug=False)->_IC:
391 """
392 Rectify a set of completions to all have the same ``start`` and ``end``
352
393
353 .. warning:: Unstable
394 .. warning:: Unstable
354
395
355 This function is unstable, API may change without warning.
396 This function is unstable, API may change without warning.
356 It will also raise unless use in proper context manager.
397 It will also raise unless use in proper context manager.
357
398
358 Parameters
399 Parameters
359 ----------
400 ----------
360 text: str
401 text: str
361 text that should be completed.
402 text that should be completed.
362 completion: Iterator[Completion]
403 completions: Iterator[Completion]
363 iterator over the completions to rectify
404 iterator over the completions to rectify
364
405
365
406
366 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
407 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
367 the Jupyter Protocol requires them to behave like so. This will readjust
408 the Jupyter Protocol requires them to behave like so. This will readjust
368 the completion to have the same ``start`` and ``end` by padding both
409 the completion to have the same ``start`` and ``end` by padding both
369 extremities with surrounding text.
410 extremities with surrounding text.
370
411
371 During stabilisation should support a ``_debug`` option to log which
412 During stabilisation should support a ``_debug`` option to log which
372 completion are return by the IPython completer and not found in Jedi in
413 completion are return by the IPython completer and not found in Jedi in
373 order to make upstream bug report.
414 order to make upstream bug report.
374 """
415 """
375 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
416 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
376 "It may change without warnings. "
417 "It may change without warnings. "
377 "Use in corresponding context manager.",
418 "Use in corresponding context manager.",
378 category=ProvisionalCompleterWarning, stacklevel=2)
419 category=ProvisionalCompleterWarning, stacklevel=2)
379
420
380 completions = list(completion)
421 completions = list(completion)
381 if not completions:
422 if not completions:
382 return
423 return
383 starts = (c.start for c in completions)
424 starts = (c.start for c in completions)
384 ends = (c.end for c in completions)
425 ends = (c.end for c in completions)
385
426
386 new_start = min(starts)
427 new_start = min(starts)
387 new_end = max(ends)
428 new_end = max(ends)
388
429
389 seen_jedi = set()
430 seen_jedi = set()
390 seen_python_matches = set()
431 seen_python_matches = set()
391 for c in completions:
432 for c in completions:
392 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
433 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
393 if c._origin == 'jedi':
434 if c._origin == 'jedi':
394 seen_jedi.add(new_text)
435 seen_jedi.add(new_text)
395 elif c._origin == 'IPCompleter.python_matches':
436 elif c._origin == 'IPCompleter.python_matches':
396 seen_python_matches.add(new_text)
437 seen_python_matches.add(new_text)
397 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin)
438 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin)
398 diff = seen_python_matches.difference(seen_jedi)
439 diff = seen_python_matches.difference(seen_jedi)
399 if diff and _debug:
440 if diff and _debug:
400 print('IPython.python matches have extras:', diff)
441 print('IPython.python matches have extras:', diff)
401
442
402
443
403 if sys.platform == 'win32':
444 if sys.platform == 'win32':
404 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
445 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
405 else:
446 else:
406 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
447 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
407
448
408 GREEDY_DELIMS = ' =\r\n'
449 GREEDY_DELIMS = ' =\r\n'
409
450
410
451
411 class CompletionSplitter(object):
452 class CompletionSplitter(object):
412 """An object to split an input line in a manner similar to readline.
453 """An object to split an input line in a manner similar to readline.
413
454
414 By having our own implementation, we can expose readline-like completion in
455 By having our own implementation, we can expose readline-like completion in
415 a uniform manner to all frontends. This object only needs to be given the
456 a uniform manner to all frontends. This object only needs to be given the
416 line of text to be split and the cursor position on said line, and it
457 line of text to be split and the cursor position on said line, and it
417 returns the 'word' to be completed on at the cursor after splitting the
458 returns the 'word' to be completed on at the cursor after splitting the
418 entire line.
459 entire line.
419
460
420 What characters are used as splitting delimiters can be controlled by
461 What characters are used as splitting delimiters can be controlled by
421 setting the ``delims`` attribute (this is a property that internally
462 setting the ``delims`` attribute (this is a property that internally
422 automatically builds the necessary regular expression)"""
463 automatically builds the necessary regular expression)"""
423
464
424 # Private interface
465 # Private interface
425
466
426 # A string of delimiter characters. The default value makes sense for
467 # A string of delimiter characters. The default value makes sense for
427 # IPython's most typical usage patterns.
468 # IPython's most typical usage patterns.
428 _delims = DELIMS
469 _delims = DELIMS
429
470
430 # The expression (a normal string) to be compiled into a regular expression
471 # The expression (a normal string) to be compiled into a regular expression
431 # for actual splitting. We store it as an attribute mostly for ease of
472 # for actual splitting. We store it as an attribute mostly for ease of
432 # debugging, since this type of code can be so tricky to debug.
473 # debugging, since this type of code can be so tricky to debug.
433 _delim_expr = None
474 _delim_expr = None
434
475
435 # The regular expression that does the actual splitting
476 # The regular expression that does the actual splitting
436 _delim_re = None
477 _delim_re = None
437
478
438 def __init__(self, delims=None):
479 def __init__(self, delims=None):
439 delims = CompletionSplitter._delims if delims is None else delims
480 delims = CompletionSplitter._delims if delims is None else delims
440 self.delims = delims
481 self.delims = delims
441
482
442 @property
483 @property
443 def delims(self):
484 def delims(self):
444 """Return the string of delimiter characters."""
485 """Return the string of delimiter characters."""
445 return self._delims
486 return self._delims
446
487
447 @delims.setter
488 @delims.setter
448 def delims(self, delims):
489 def delims(self, delims):
449 """Set the delimiters for line splitting."""
490 """Set the delimiters for line splitting."""
450 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
491 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
451 self._delim_re = re.compile(expr)
492 self._delim_re = re.compile(expr)
452 self._delims = delims
493 self._delims = delims
453 self._delim_expr = expr
494 self._delim_expr = expr
454
495
455 def split_line(self, line, cursor_pos=None):
496 def split_line(self, line, cursor_pos=None):
456 """Split a line of text with a cursor at the given position.
497 """Split a line of text with a cursor at the given position.
457 """
498 """
458 l = line if cursor_pos is None else line[:cursor_pos]
499 l = line if cursor_pos is None else line[:cursor_pos]
459 return self._delim_re.split(l)[-1]
500 return self._delim_re.split(l)[-1]
460
501
461
502
462
503
463 class Completer(Configurable):
504 class Completer(Configurable):
464
505
465 greedy = Bool(False,
506 greedy = Bool(False,
466 help="""Activate greedy completion
507 help="""Activate greedy completion
467 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
508 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
468
509
469 This will enable completion on elements of lists, results of function calls, etc.,
510 This will enable completion on elements of lists, results of function calls, etc.,
470 but can be unsafe because the code is actually evaluated on TAB.
511 but can be unsafe because the code is actually evaluated on TAB.
471 """
512 """
472 ).tag(config=True)
513 ).tag(config=True)
473
514
474 use_jedi = Bool(default_value=JEDI_INSTALLED,
515 use_jedi = Bool(default_value=JEDI_INSTALLED,
475 help="Experimental: Use Jedi to generate autocompletions. "
516 help="Experimental: Use Jedi to generate autocompletions. "
476 "Default to True if jedi is installed").tag(config=True)
517 "Default to True if jedi is installed").tag(config=True)
477
518
478 jedi_compute_type_timeout = Int(default_value=400,
519 jedi_compute_type_timeout = Int(default_value=400,
479 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
520 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
480 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
521 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
481 performance by preventing jedi to build its cache.
522 performance by preventing jedi to build its cache.
482 """).tag(config=True)
523 """).tag(config=True)
483
524
484 debug = Bool(default_value=False,
525 debug = Bool(default_value=False,
485 help='Enable debug for the Completer. Mostly print extra '
526 help='Enable debug for the Completer. Mostly print extra '
486 'information for experimental jedi integration.')\
527 'information for experimental jedi integration.')\
487 .tag(config=True)
528 .tag(config=True)
488
529
489
530
490 def __init__(self, namespace=None, global_namespace=None, **kwargs):
531 def __init__(self, namespace=None, global_namespace=None, **kwargs):
491 """Create a new completer for the command line.
532 """Create a new completer for the command line.
492
533
493 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
534 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
494
535
495 If unspecified, the default namespace where completions are performed
536 If unspecified, the default namespace where completions are performed
496 is __main__ (technically, __main__.__dict__). Namespaces should be
537 is __main__ (technically, __main__.__dict__). Namespaces should be
497 given as dictionaries.
538 given as dictionaries.
498
539
499 An optional second namespace can be given. This allows the completer
540 An optional second namespace can be given. This allows the completer
500 to handle cases where both the local and global scopes need to be
541 to handle cases where both the local and global scopes need to be
501 distinguished.
542 distinguished.
502 """
543 """
503
544
504 # Don't bind to namespace quite yet, but flag whether the user wants a
545 # Don't bind to namespace quite yet, but flag whether the user wants a
505 # specific namespace or to use __main__.__dict__. This will allow us
546 # specific namespace or to use __main__.__dict__. This will allow us
506 # to bind to __main__.__dict__ at completion time, not now.
547 # to bind to __main__.__dict__ at completion time, not now.
507 if namespace is None:
548 if namespace is None:
508 self.use_main_ns = True
549 self.use_main_ns = True
509 else:
550 else:
510 self.use_main_ns = False
551 self.use_main_ns = False
511 self.namespace = namespace
552 self.namespace = namespace
512
553
513 # The global namespace, if given, can be bound directly
554 # The global namespace, if given, can be bound directly
514 if global_namespace is None:
555 if global_namespace is None:
515 self.global_namespace = {}
556 self.global_namespace = {}
516 else:
557 else:
517 self.global_namespace = global_namespace
558 self.global_namespace = global_namespace
518
559
519 super(Completer, self).__init__(**kwargs)
560 super(Completer, self).__init__(**kwargs)
520
561
521 def complete(self, text, state):
562 def complete(self, text, state):
522 """Return the next possible completion for 'text'.
563 """Return the next possible completion for 'text'.
523
564
524 This is called successively with state == 0, 1, 2, ... until it
565 This is called successively with state == 0, 1, 2, ... until it
525 returns None. The completion should begin with 'text'.
566 returns None. The completion should begin with 'text'.
526
567
527 """
568 """
528 if self.use_main_ns:
569 if self.use_main_ns:
529 self.namespace = __main__.__dict__
570 self.namespace = __main__.__dict__
530
571
531 if state == 0:
572 if state == 0:
532 if "." in text:
573 if "." in text:
533 self.matches = self.attr_matches(text)
574 self.matches = self.attr_matches(text)
534 else:
575 else:
535 self.matches = self.global_matches(text)
576 self.matches = self.global_matches(text)
536 try:
577 try:
537 return self.matches[state]
578 return self.matches[state]
538 except IndexError:
579 except IndexError:
539 return None
580 return None
540
581
541 def global_matches(self, text):
582 def global_matches(self, text):
542 """Compute matches when text is a simple name.
583 """Compute matches when text is a simple name.
543
584
544 Return a list of all keywords, built-in functions and names currently
585 Return a list of all keywords, built-in functions and names currently
545 defined in self.namespace or self.global_namespace that match.
586 defined in self.namespace or self.global_namespace that match.
546
587
547 """
588 """
548 matches = []
589 matches = []
549 match_append = matches.append
590 match_append = matches.append
550 n = len(text)
591 n = len(text)
551 for lst in [keyword.kwlist,
592 for lst in [keyword.kwlist,
552 builtin_mod.__dict__.keys(),
593 builtin_mod.__dict__.keys(),
553 self.namespace.keys(),
594 self.namespace.keys(),
554 self.global_namespace.keys()]:
595 self.global_namespace.keys()]:
555 for word in lst:
596 for word in lst:
556 if word[:n] == text and word != "__builtins__":
597 if word[:n] == text and word != "__builtins__":
557 match_append(word)
598 match_append(word)
558 return [cast_unicode_py2(m) for m in matches]
599 return [cast_unicode_py2(m) for m in matches]
559
600
560 def attr_matches(self, text):
601 def attr_matches(self, text):
561 """Compute matches when text contains a dot.
602 """Compute matches when text contains a dot.
562
603
563 Assuming the text is of the form NAME.NAME....[NAME], and is
604 Assuming the text is of the form NAME.NAME....[NAME], and is
564 evaluatable in self.namespace or self.global_namespace, it will be
605 evaluatable in self.namespace or self.global_namespace, it will be
565 evaluated and its attributes (as revealed by dir()) are used as
606 evaluated and its attributes (as revealed by dir()) are used as
566 possible completions. (For class instances, class members are are
607 possible completions. (For class instances, class members are are
567 also considered.)
608 also considered.)
568
609
569 WARNING: this can still invoke arbitrary C code, if an object
610 WARNING: this can still invoke arbitrary C code, if an object
570 with a __getattr__ hook is evaluated.
611 with a __getattr__ hook is evaluated.
571
612
572 """
613 """
573
614
574 # Another option, seems to work great. Catches things like ''.<tab>
615 # Another option, seems to work great. Catches things like ''.<tab>
575 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
616 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
576
617
577 if m:
618 if m:
578 expr, attr = m.group(1, 3)
619 expr, attr = m.group(1, 3)
579 elif self.greedy:
620 elif self.greedy:
580 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
621 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
581 if not m2:
622 if not m2:
582 return []
623 return []
583 expr, attr = m2.group(1,2)
624 expr, attr = m2.group(1,2)
584 else:
625 else:
585 return []
626 return []
586
627
587 try:
628 try:
588 obj = eval(expr, self.namespace)
629 obj = eval(expr, self.namespace)
589 except:
630 except:
590 try:
631 try:
591 obj = eval(expr, self.global_namespace)
632 obj = eval(expr, self.global_namespace)
592 except:
633 except:
593 return []
634 return []
594
635
595 if self.limit_to__all__ and hasattr(obj, '__all__'):
636 if self.limit_to__all__ and hasattr(obj, '__all__'):
596 words = get__all__entries(obj)
637 words = get__all__entries(obj)
597 else:
638 else:
598 words = dir2(obj)
639 words = dir2(obj)
599
640
600 try:
641 try:
601 words = generics.complete_object(obj, words)
642 words = generics.complete_object(obj, words)
602 except TryNext:
643 except TryNext:
603 pass
644 pass
604 except AssertionError:
645 except AssertionError:
605 raise
646 raise
606 except Exception:
647 except Exception:
607 # Silence errors from completion function
648 # Silence errors from completion function
608 #raise # dbg
649 #raise # dbg
609 pass
650 pass
610 # Build match list to return
651 # Build match list to return
611 n = len(attr)
652 n = len(attr)
612 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
653 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
613
654
614
655
615 def get__all__entries(obj):
656 def get__all__entries(obj):
616 """returns the strings in the __all__ attribute"""
657 """returns the strings in the __all__ attribute"""
617 try:
658 try:
618 words = getattr(obj, '__all__')
659 words = getattr(obj, '__all__')
619 except:
660 except:
620 return []
661 return []
621
662
622 return [cast_unicode_py2(w) for w in words if isinstance(w, str)]
663 return [cast_unicode_py2(w) for w in words if isinstance(w, str)]
623
664
624
665
625 def match_dict_keys(keys: List[str], prefix: str, delims: str):
666 def match_dict_keys(keys: List[str], prefix: str, delims: str):
626 """Used by dict_key_matches, matching the prefix to a list of keys
667 """Used by dict_key_matches, matching the prefix to a list of keys
627
668
628 Parameters
669 Parameters
629 ==========
670 ==========
630 keys:
671 keys:
631 list of keys in dictionary currently being completed.
672 list of keys in dictionary currently being completed.
632 prefix:
673 prefix:
633 Part of the text already typed by the user. e.g. `mydict[b'fo`
674 Part of the text already typed by the user. e.g. `mydict[b'fo`
634 delims:
675 delims:
635 String of delimiters to consider when finding the current key.
676 String of delimiters to consider when finding the current key.
636
677
637 Returns
678 Returns
638 =======
679 =======
639
680
640 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
681 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
641 ``quote`` being the quote that need to be used to close current string.
682 ``quote`` being the quote that need to be used to close current string.
642 ``token_start`` the position where the replacement should start occurring,
683 ``token_start`` the position where the replacement should start occurring,
643 ``matches`` a list of replacement/completion
684 ``matches`` a list of replacement/completion
644
685
645 """
686 """
646 if not prefix:
687 if not prefix:
647 return None, 0, [repr(k) for k in keys
688 return None, 0, [repr(k) for k in keys
648 if isinstance(k, (str, bytes))]
689 if isinstance(k, (str, bytes))]
649 quote_match = re.search('["\']', prefix)
690 quote_match = re.search('["\']', prefix)
650 quote = quote_match.group()
691 quote = quote_match.group()
651 try:
692 try:
652 prefix_str = eval(prefix + quote, {})
693 prefix_str = eval(prefix + quote, {})
653 except Exception:
694 except Exception:
654 return None, 0, []
695 return None, 0, []
655
696
656 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
697 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
657 token_match = re.search(pattern, prefix, re.UNICODE)
698 token_match = re.search(pattern, prefix, re.UNICODE)
658 token_start = token_match.start()
699 token_start = token_match.start()
659 token_prefix = token_match.group()
700 token_prefix = token_match.group()
660
701
661 matched = []
702 matched = []
662 for key in keys:
703 for key in keys:
663 try:
704 try:
664 if not key.startswith(prefix_str):
705 if not key.startswith(prefix_str):
665 continue
706 continue
666 except (AttributeError, TypeError, UnicodeError):
707 except (AttributeError, TypeError, UnicodeError):
667 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
708 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
668 continue
709 continue
669
710
670 # reformat remainder of key to begin with prefix
711 # reformat remainder of key to begin with prefix
671 rem = key[len(prefix_str):]
712 rem = key[len(prefix_str):]
672 # force repr wrapped in '
713 # force repr wrapped in '
673 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
714 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
674 if rem_repr.startswith('u') and prefix[0] not in 'uU':
715 if rem_repr.startswith('u') and prefix[0] not in 'uU':
675 # Found key is unicode, but prefix is Py2 string.
716 # Found key is unicode, but prefix is Py2 string.
676 # Therefore attempt to interpret key as string.
717 # Therefore attempt to interpret key as string.
677 try:
718 try:
678 rem_repr = repr(rem.encode('ascii') + '"')
719 rem_repr = repr(rem.encode('ascii') + '"')
679 except UnicodeEncodeError:
720 except UnicodeEncodeError:
680 continue
721 continue
681
722
682 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
723 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
683 if quote == '"':
724 if quote == '"':
684 # The entered prefix is quoted with ",
725 # The entered prefix is quoted with ",
685 # but the match is quoted with '.
726 # but the match is quoted with '.
686 # A contained " hence needs escaping for comparison:
727 # A contained " hence needs escaping for comparison:
687 rem_repr = rem_repr.replace('"', '\\"')
728 rem_repr = rem_repr.replace('"', '\\"')
688
729
689 # then reinsert prefix from start of token
730 # then reinsert prefix from start of token
690 matched.append('%s%s' % (token_prefix, rem_repr))
731 matched.append('%s%s' % (token_prefix, rem_repr))
691 return quote, token_start, matched
732 return quote, token_start, matched
692
733
693
734
694 def cursor_to_position(text:int, line:int, column:int)->int:
735 def cursor_to_position(text:int, line:int, column:int)->int:
695 """
736 """
696
737
697 Convert the (line,column) position of the cursor in text to an offset in a
738 Convert the (line,column) position of the cursor in text to an offset in a
698 string.
739 string.
699
740
700 Parameter
741 Parameter
701 ---------
742 ---------
702
743
703 text : str
744 text : str
704 The text in which to calculate the cursor offset
745 The text in which to calculate the cursor offset
705 line : int
746 line : int
706 Line of the cursor; 0-indexed
747 Line of the cursor; 0-indexed
707 column : int
748 column : int
708 Column of the cursor 0-indexed
749 Column of the cursor 0-indexed
709
750
710 Return
751 Return
711 ------
752 ------
712 Position of the cursor in ``text``, 0-indexed.
753 Position of the cursor in ``text``, 0-indexed.
713
754
714 See Also
755 See Also
715 --------
756 --------
716 position_to_cursor: reciprocal of this function
757 position_to_cursor: reciprocal of this function
717
758
718 """
759 """
719 lines = text.split('\n')
760 lines = text.split('\n')
720 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
761 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
721
762
722 return sum(len(l) + 1 for l in lines[:line]) + column
763 return sum(len(l) + 1 for l in lines[:line]) + column
723
764
724 def position_to_cursor(text:str, offset:int)->(int, int):
765 def position_to_cursor(text:str, offset:int)->(int, int):
725 """
766 """
726 Convert the position of the cursor in text (0 indexed) to a line
767 Convert the position of the cursor in text (0 indexed) to a line
727 number(0-indexed) and a column number (0-indexed) pair
768 number(0-indexed) and a column number (0-indexed) pair
728
769
729 Position should be a valid position in ``text``.
770 Position should be a valid position in ``text``.
730
771
731 Parameter
772 Parameter
732 ---------
773 ---------
733
774
734 text : str
775 text : str
735 The text in which to calculate the cursor offset
776 The text in which to calculate the cursor offset
736 offset : int
777 offset : int
737 Position of the cursor in ``text``, 0-indexed.
778 Position of the cursor in ``text``, 0-indexed.
738
779
739 Return
780 Return
740 ------
781 ------
741 (line, column) : (int, int)
782 (line, column) : (int, int)
742 Line of the cursor; 0-indexed, column of the cursor 0-indexed
783 Line of the cursor; 0-indexed, column of the cursor 0-indexed
743
784
744
785
745 See Also
786 See Also
746 --------
787 --------
747 cursor_to_position : reciprocal of this function
788 cursor_to_position : reciprocal of this function
748
789
749
790
750 """
791 """
751
792
752 assert 0 < offset <= len(text) , "0 < %s <= %s" % (offset , len(text))
793 assert 0 < offset <= len(text) , "0 < %s <= %s" % (offset , len(text))
753
794
754 before = text[:offset]
795 before = text[:offset]
755 blines = before.split('\n') # ! splitnes trim trailing \n
796 blines = before.split('\n') # ! splitnes trim trailing \n
756 line = before.count('\n')
797 line = before.count('\n')
757 col = len(blines[-1])
798 col = len(blines[-1])
758 return line, col
799 return line, col
759
800
760
801
761 def _safe_isinstance(obj, module, class_name):
802 def _safe_isinstance(obj, module, class_name):
762 """Checks if obj is an instance of module.class_name if loaded
803 """Checks if obj is an instance of module.class_name if loaded
763 """
804 """
764 return (module in sys.modules and
805 return (module in sys.modules and
765 isinstance(obj, getattr(import_module(module), class_name)))
806 isinstance(obj, getattr(import_module(module), class_name)))
766
807
767
808
768 def back_unicode_name_matches(text):
809 def back_unicode_name_matches(text):
769 u"""Match unicode characters back to unicode name
810 u"""Match unicode characters back to unicode name
770
811
771 This does ``β˜ƒ`` -> ``\\snowman``
812 This does ``β˜ƒ`` -> ``\\snowman``
772
813
773 Note that snowman is not a valid python3 combining character but will be expanded.
814 Note that snowman is not a valid python3 combining character but will be expanded.
774 Though it will not recombine back to the snowman character by the completion machinery.
815 Though it will not recombine back to the snowman character by the completion machinery.
775
816
776 This will not either back-complete standard sequences like \\n, \\b ...
817 This will not either back-complete standard sequences like \\n, \\b ...
777
818
778 Used on Python 3 only.
819 Used on Python 3 only.
779 """
820 """
780 if len(text)<2:
821 if len(text)<2:
781 return u'', ()
822 return u'', ()
782 maybe_slash = text[-2]
823 maybe_slash = text[-2]
783 if maybe_slash != '\\':
824 if maybe_slash != '\\':
784 return u'', ()
825 return u'', ()
785
826
786 char = text[-1]
827 char = text[-1]
787 # no expand on quote for completion in strings.
828 # no expand on quote for completion in strings.
788 # nor backcomplete standard ascii keys
829 # nor backcomplete standard ascii keys
789 if char in string.ascii_letters or char in ['"',"'"]:
830 if char in string.ascii_letters or char in ['"',"'"]:
790 return u'', ()
831 return u'', ()
791 try :
832 try :
792 unic = unicodedata.name(char)
833 unic = unicodedata.name(char)
793 return '\\'+char,['\\'+unic]
834 return '\\'+char,['\\'+unic]
794 except KeyError:
835 except KeyError:
795 pass
836 pass
796 return u'', ()
837 return u'', ()
797
838
798 def back_latex_name_matches(text:str):
839 def back_latex_name_matches(text:str):
799 """Match latex characters back to unicode name
840 """Match latex characters back to unicode name
800
841
801 This does ``\\β„΅`` -> ``\\aleph``
842 This does ``\\β„΅`` -> ``\\aleph``
802
843
803 Used on Python 3 only.
844 Used on Python 3 only.
804 """
845 """
805 if len(text)<2:
846 if len(text)<2:
806 return u'', ()
847 return u'', ()
807 maybe_slash = text[-2]
848 maybe_slash = text[-2]
808 if maybe_slash != '\\':
849 if maybe_slash != '\\':
809 return u'', ()
850 return u'', ()
810
851
811
852
812 char = text[-1]
853 char = text[-1]
813 # no expand on quote for completion in strings.
854 # no expand on quote for completion in strings.
814 # nor backcomplete standard ascii keys
855 # nor backcomplete standard ascii keys
815 if char in string.ascii_letters or char in ['"',"'"]:
856 if char in string.ascii_letters or char in ['"',"'"]:
816 return u'', ()
857 return u'', ()
817 try :
858 try :
818 latex = reverse_latex_symbol[char]
859 latex = reverse_latex_symbol[char]
819 # '\\' replace the \ as well
860 # '\\' replace the \ as well
820 return '\\'+char,[latex]
861 return '\\'+char,[latex]
821 except KeyError:
862 except KeyError:
822 pass
863 pass
823 return u'', ()
864 return u'', ()
824
865
825
866
826 class IPCompleter(Completer):
867 class IPCompleter(Completer):
827 """Extension of the completer class with IPython-specific features"""
868 """Extension of the completer class with IPython-specific features"""
828
869
829 @observe('greedy')
870 @observe('greedy')
830 def _greedy_changed(self, change):
871 def _greedy_changed(self, change):
831 """update the splitter and readline delims when greedy is changed"""
872 """update the splitter and readline delims when greedy is changed"""
832 if change['new']:
873 if change['new']:
833 self.splitter.delims = GREEDY_DELIMS
874 self.splitter.delims = GREEDY_DELIMS
834 else:
875 else:
835 self.splitter.delims = DELIMS
876 self.splitter.delims = DELIMS
836
877
837 merge_completions = Bool(True,
878 merge_completions = Bool(True,
838 help="""Whether to merge completion results into a single list
879 help="""Whether to merge completion results into a single list
839
880
840 If False, only the completion results from the first non-empty
881 If False, only the completion results from the first non-empty
841 completer will be returned.
882 completer will be returned.
842 """
883 """
843 ).tag(config=True)
884 ).tag(config=True)
844 omit__names = Enum((0,1,2), default_value=2,
885 omit__names = Enum((0,1,2), default_value=2,
845 help="""Instruct the completer to omit private method names
886 help="""Instruct the completer to omit private method names
846
887
847 Specifically, when completing on ``object.<tab>``.
888 Specifically, when completing on ``object.<tab>``.
848
889
849 When 2 [default]: all names that start with '_' will be excluded.
890 When 2 [default]: all names that start with '_' will be excluded.
850
891
851 When 1: all 'magic' names (``__foo__``) will be excluded.
892 When 1: all 'magic' names (``__foo__``) will be excluded.
852
893
853 When 0: nothing will be excluded.
894 When 0: nothing will be excluded.
854 """
895 """
855 ).tag(config=True)
896 ).tag(config=True)
856 limit_to__all__ = Bool(False,
897 limit_to__all__ = Bool(False,
857 help="""
898 help="""
858 DEPRECATED as of version 5.0.
899 DEPRECATED as of version 5.0.
859
900
860 Instruct the completer to use __all__ for the completion
901 Instruct the completer to use __all__ for the completion
861
902
862 Specifically, when completing on ``object.<tab>``.
903 Specifically, when completing on ``object.<tab>``.
863
904
864 When True: only those names in obj.__all__ will be included.
905 When True: only those names in obj.__all__ will be included.
865
906
866 When False [default]: the __all__ attribute is ignored
907 When False [default]: the __all__ attribute is ignored
867 """,
908 """,
868 ).tag(config=True)
909 ).tag(config=True)
869
910
870 @observe('limit_to__all__')
911 @observe('limit_to__all__')
871 def _limit_to_all_changed(self, change):
912 def _limit_to_all_changed(self, change):
872 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
913 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
873 'value has been deprecated since IPython 5.0, will be made to have '
914 'value has been deprecated since IPython 5.0, will be made to have '
874 'no effects and then removed in future version of IPython.',
915 'no effects and then removed in future version of IPython.',
875 UserWarning)
916 UserWarning)
876
917
877 def __init__(self, shell=None, namespace=None, global_namespace=None,
918 def __init__(self, shell=None, namespace=None, global_namespace=None,
878 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
919 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
879 """IPCompleter() -> completer
920 """IPCompleter() -> completer
880
921
881 Return a completer object.
922 Return a completer object.
882
923
883 Parameters
924 Parameters
884 ----------
925 ----------
885
926
886 shell
927 shell
887 a pointer to the ipython shell itself. This is needed
928 a pointer to the ipython shell itself. This is needed
888 because this completer knows about magic functions, and those can
929 because this completer knows about magic functions, and those can
889 only be accessed via the ipython instance.
930 only be accessed via the ipython instance.
890
931
891 namespace : dict, optional
932 namespace : dict, optional
892 an optional dict where completions are performed.
933 an optional dict where completions are performed.
893
934
894 global_namespace : dict, optional
935 global_namespace : dict, optional
895 secondary optional dict for completions, to
936 secondary optional dict for completions, to
896 handle cases (such as IPython embedded inside functions) where
937 handle cases (such as IPython embedded inside functions) where
897 both Python scopes are visible.
938 both Python scopes are visible.
898
939
899 use_readline : bool, optional
940 use_readline : bool, optional
900 DEPRECATED, ignored since IPython 6.0, will have no effects
941 DEPRECATED, ignored since IPython 6.0, will have no effects
901 """
942 """
902
943
903 self.magic_escape = ESC_MAGIC
944 self.magic_escape = ESC_MAGIC
904 self.splitter = CompletionSplitter()
945 self.splitter = CompletionSplitter()
905
946
906 if use_readline is not _deprecation_readline_sentinel:
947 if use_readline is not _deprecation_readline_sentinel:
907 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
948 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
908 DeprecationWarning, stacklevel=2)
949 DeprecationWarning, stacklevel=2)
909
950
910 # _greedy_changed() depends on splitter and readline being defined:
951 # _greedy_changed() depends on splitter and readline being defined:
911 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
952 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
912 config=config, **kwargs)
953 config=config, **kwargs)
913
954
914 # List where completion matches will be stored
955 # List where completion matches will be stored
915 self.matches = []
956 self.matches = []
916 self.shell = shell
957 self.shell = shell
917 # Regexp to split filenames with spaces in them
958 # Regexp to split filenames with spaces in them
918 self.space_name_re = re.compile(r'([^\\] )')
959 self.space_name_re = re.compile(r'([^\\] )')
919 # Hold a local ref. to glob.glob for speed
960 # Hold a local ref. to glob.glob for speed
920 self.glob = glob.glob
961 self.glob = glob.glob
921
962
922 # Determine if we are running on 'dumb' terminals, like (X)Emacs
963 # Determine if we are running on 'dumb' terminals, like (X)Emacs
923 # buffers, to avoid completion problems.
964 # buffers, to avoid completion problems.
924 term = os.environ.get('TERM','xterm')
965 term = os.environ.get('TERM','xterm')
925 self.dumb_terminal = term in ['dumb','emacs']
966 self.dumb_terminal = term in ['dumb','emacs']
926
967
927 # Special handling of backslashes needed in win32 platforms
968 # Special handling of backslashes needed in win32 platforms
928 if sys.platform == "win32":
969 if sys.platform == "win32":
929 self.clean_glob = self._clean_glob_win32
970 self.clean_glob = self._clean_glob_win32
930 else:
971 else:
931 self.clean_glob = self._clean_glob
972 self.clean_glob = self._clean_glob
932
973
933 #regexp to parse docstring for function signature
974 #regexp to parse docstring for function signature
934 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
975 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
935 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
976 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
936 #use this if positional argument name is also needed
977 #use this if positional argument name is also needed
937 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
978 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
938
979
939 # All active matcher routines for completion
980 # All active matcher routines for completion
940 self.matchers = [
981 self.matchers = [
941 self.python_matches,
982 self.python_matches,
942 self.file_matches,
983 self.file_matches,
943 self.magic_matches,
984 self.magic_matches,
944 self.python_func_kw_matches,
985 self.python_func_kw_matches,
945 self.dict_key_matches,
986 self.dict_key_matches,
946 ]
987 ]
947
988
948 # This is set externally by InteractiveShell
989 # This is set externally by InteractiveShell
949 self.custom_completers = None
990 self.custom_completers = None
950
991
951 def all_completions(self, text):
992 def all_completions(self, text):
952 """
993 """
953 Wrapper around the complete method for the benefit of emacs.
994 Wrapper around the complete method for the benefit of emacs.
954 """
995 """
955 return self.complete(text)[1]
996 return self.complete(text)[1]
956
997
957 def _clean_glob(self, text):
998 def _clean_glob(self, text):
958 return self.glob("%s*" % text)
999 return self.glob("%s*" % text)
959
1000
960 def _clean_glob_win32(self,text):
1001 def _clean_glob_win32(self,text):
961 return [f.replace("\\","/")
1002 return [f.replace("\\","/")
962 for f in self.glob("%s*" % text)]
1003 for f in self.glob("%s*" % text)]
963
1004
964 def file_matches(self, text):
1005 def file_matches(self, text):
965 """Match filenames, expanding ~USER type strings.
1006 """Match filenames, expanding ~USER type strings.
966
1007
967 Most of the seemingly convoluted logic in this completer is an
1008 Most of the seemingly convoluted logic in this completer is an
968 attempt to handle filenames with spaces in them. And yet it's not
1009 attempt to handle filenames with spaces in them. And yet it's not
969 quite perfect, because Python's readline doesn't expose all of the
1010 quite perfect, because Python's readline doesn't expose all of the
970 GNU readline details needed for this to be done correctly.
1011 GNU readline details needed for this to be done correctly.
971
1012
972 For a filename with a space in it, the printed completions will be
1013 For a filename with a space in it, the printed completions will be
973 only the parts after what's already been typed (instead of the
1014 only the parts after what's already been typed (instead of the
974 full completions, as is normally done). I don't think with the
1015 full completions, as is normally done). I don't think with the
975 current (as of Python 2.3) Python readline it's possible to do
1016 current (as of Python 2.3) Python readline it's possible to do
976 better."""
1017 better."""
977
1018
978 # chars that require escaping with backslash - i.e. chars
1019 # chars that require escaping with backslash - i.e. chars
979 # that readline treats incorrectly as delimiters, but we
1020 # that readline treats incorrectly as delimiters, but we
980 # don't want to treat as delimiters in filename matching
1021 # don't want to treat as delimiters in filename matching
981 # when escaped with backslash
1022 # when escaped with backslash
982 if text.startswith('!'):
1023 if text.startswith('!'):
983 text = text[1:]
1024 text = text[1:]
984 text_prefix = u'!'
1025 text_prefix = u'!'
985 else:
1026 else:
986 text_prefix = u''
1027 text_prefix = u''
987
1028
988 text_until_cursor = self.text_until_cursor
1029 text_until_cursor = self.text_until_cursor
989 # track strings with open quotes
1030 # track strings with open quotes
990 open_quotes = has_open_quotes(text_until_cursor)
1031 open_quotes = has_open_quotes(text_until_cursor)
991
1032
992 if '(' in text_until_cursor or '[' in text_until_cursor:
1033 if '(' in text_until_cursor or '[' in text_until_cursor:
993 lsplit = text
1034 lsplit = text
994 else:
1035 else:
995 try:
1036 try:
996 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1037 # arg_split ~ shlex.split, but with unicode bugs fixed by us
997 lsplit = arg_split(text_until_cursor)[-1]
1038 lsplit = arg_split(text_until_cursor)[-1]
998 except ValueError:
1039 except ValueError:
999 # typically an unmatched ", or backslash without escaped char.
1040 # typically an unmatched ", or backslash without escaped char.
1000 if open_quotes:
1041 if open_quotes:
1001 lsplit = text_until_cursor.split(open_quotes)[-1]
1042 lsplit = text_until_cursor.split(open_quotes)[-1]
1002 else:
1043 else:
1003 return []
1044 return []
1004 except IndexError:
1045 except IndexError:
1005 # tab pressed on empty line
1046 # tab pressed on empty line
1006 lsplit = ""
1047 lsplit = ""
1007
1048
1008 if not open_quotes and lsplit != protect_filename(lsplit):
1049 if not open_quotes and lsplit != protect_filename(lsplit):
1009 # if protectables are found, do matching on the whole escaped name
1050 # if protectables are found, do matching on the whole escaped name
1010 has_protectables = True
1051 has_protectables = True
1011 text0,text = text,lsplit
1052 text0,text = text,lsplit
1012 else:
1053 else:
1013 has_protectables = False
1054 has_protectables = False
1014 text = os.path.expanduser(text)
1055 text = os.path.expanduser(text)
1015
1056
1016 if text == "":
1057 if text == "":
1017 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
1058 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
1018
1059
1019 # Compute the matches from the filesystem
1060 # Compute the matches from the filesystem
1020 if sys.platform == 'win32':
1061 if sys.platform == 'win32':
1021 m0 = self.clean_glob(text)
1062 m0 = self.clean_glob(text)
1022 else:
1063 else:
1023 m0 = self.clean_glob(text.replace('\\', ''))
1064 m0 = self.clean_glob(text.replace('\\', ''))
1024
1065
1025 if has_protectables:
1066 if has_protectables:
1026 # If we had protectables, we need to revert our changes to the
1067 # If we had protectables, we need to revert our changes to the
1027 # beginning of filename so that we don't double-write the part
1068 # beginning of filename so that we don't double-write the part
1028 # of the filename we have so far
1069 # of the filename we have so far
1029 len_lsplit = len(lsplit)
1070 len_lsplit = len(lsplit)
1030 matches = [text_prefix + text0 +
1071 matches = [text_prefix + text0 +
1031 protect_filename(f[len_lsplit:]) for f in m0]
1072 protect_filename(f[len_lsplit:]) for f in m0]
1032 else:
1073 else:
1033 if open_quotes:
1074 if open_quotes:
1034 # if we have a string with an open quote, we don't need to
1075 # if we have a string with an open quote, we don't need to
1035 # protect the names at all (and we _shouldn't_, as it
1076 # protect the names at all (and we _shouldn't_, as it
1036 # would cause bugs when the filesystem call is made).
1077 # would cause bugs when the filesystem call is made).
1037 matches = m0
1078 matches = m0
1038 else:
1079 else:
1039 matches = [text_prefix +
1080 matches = [text_prefix +
1040 protect_filename(f) for f in m0]
1081 protect_filename(f) for f in m0]
1041
1082
1042 # Mark directories in input list by appending '/' to their names.
1083 # Mark directories in input list by appending '/' to their names.
1043 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
1084 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
1044
1085
1045 def magic_matches(self, text):
1086 def magic_matches(self, text):
1046 """Match magics"""
1087 """Match magics"""
1047 # Get all shell magics now rather than statically, so magics loaded at
1088 # Get all shell magics now rather than statically, so magics loaded at
1048 # runtime show up too.
1089 # runtime show up too.
1049 lsm = self.shell.magics_manager.lsmagic()
1090 lsm = self.shell.magics_manager.lsmagic()
1050 line_magics = lsm['line']
1091 line_magics = lsm['line']
1051 cell_magics = lsm['cell']
1092 cell_magics = lsm['cell']
1052 pre = self.magic_escape
1093 pre = self.magic_escape
1053 pre2 = pre+pre
1094 pre2 = pre+pre
1054
1095
1055 # Completion logic:
1096 # Completion logic:
1056 # - user gives %%: only do cell magics
1097 # - user gives %%: only do cell magics
1057 # - user gives %: do both line and cell magics
1098 # - user gives %: do both line and cell magics
1058 # - no prefix: do both
1099 # - no prefix: do both
1059 # In other words, line magics are skipped if the user gives %% explicitly
1100 # In other words, line magics are skipped if the user gives %% explicitly
1060 bare_text = text.lstrip(pre)
1101 bare_text = text.lstrip(pre)
1061 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
1102 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
1062 if not text.startswith(pre2):
1103 if not text.startswith(pre2):
1063 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
1104 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
1064 return [cast_unicode_py2(c) for c in comp]
1105 return [cast_unicode_py2(c) for c in comp]
1065
1106
1066 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str):
1107 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str):
1067 """
1108 """
1068
1109
1069 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1110 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1070 cursor position.
1111 cursor position.
1071
1112
1072 Parameters
1113 Parameters
1073 ----------
1114 ----------
1074 cursor_column : int
1115 cursor_column : int
1075 column position of the cursor in ``text``, 0-indexed.
1116 column position of the cursor in ``text``, 0-indexed.
1076 cursor_line : int
1117 cursor_line : int
1077 line position of the cursor in ``text``, 0-indexed
1118 line position of the cursor in ``text``, 0-indexed
1078 text : str
1119 text : str
1079 text to complete
1120 text to complete
1080
1121
1081 Debugging
1122 Debugging
1082 ---------
1123 ---------
1083
1124
1084 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1125 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1085 object containing a string with the Jedi debug information attached.
1126 object containing a string with the Jedi debug information attached.
1086 """
1127 """
1087 namespaces = [self.namespace]
1128 namespaces = [self.namespace]
1088 if self.global_namespace is not None:
1129 if self.global_namespace is not None:
1089 namespaces.append(self.global_namespace)
1130 namespaces.append(self.global_namespace)
1090
1131
1091 # cursor_pos is an it, jedi wants line and column
1132 # cursor_pos is an it, jedi wants line and column
1092 offset = cursor_to_position(text, cursor_line, cursor_column)
1133 offset = cursor_to_position(text, cursor_line, cursor_column)
1093 if offset:
1134 if offset:
1094 pre = text[offset-1]
1135 pre = text[offset-1]
1095 completion_filter = lambda x:x
1136 completion_filter = lambda x:x
1096 if pre == '.':
1137 if pre == '.':
1097 if self.omit__names == 2:
1138 if self.omit__names == 2:
1098 completion_filter = lambda c:not c.name.startswith('_')
1139 completion_filter = lambda c:not c.name.startswith('_')
1099 elif self.omit__names == 1:
1140 elif self.omit__names == 1:
1100 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1141 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1101 elif self.omit__names == 0:
1142 elif self.omit__names == 0:
1102 completion_filter = lambda x:x
1143 completion_filter = lambda x:x
1103 else:
1144 else:
1104 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1145 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1105
1146
1106 interpreter = jedi.Interpreter(
1147 interpreter = jedi.Interpreter(
1107 text, namespaces, column=cursor_column, line=cursor_line + 1)
1148 text, namespaces, column=cursor_column, line=cursor_line + 1)
1108 try:
1149 try:
1109 return filter(completion_filter, interpreter.completions())
1150 return filter(completion_filter, interpreter.completions())
1110 except Exception as e:
1151 except Exception as e:
1111 if self.debug:
1152 if self.debug:
1112 return [_FakeJediCompletion('Opps Jedi has crash please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1153 return [_FakeJediCompletion('Opps Jedi has crash please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1113 else:
1154 else:
1114 return []
1155 return []
1115
1156
1116 def python_matches(self, text):
1157 def python_matches(self, text):
1117 """Match attributes or global python names"""
1158 """Match attributes or global python names"""
1118 if "." in text:
1159 if "." in text:
1119 try:
1160 try:
1120 matches = self.attr_matches(text)
1161 matches = self.attr_matches(text)
1121 if text.endswith('.') and self.omit__names:
1162 if text.endswith('.') and self.omit__names:
1122 if self.omit__names == 1:
1163 if self.omit__names == 1:
1123 # true if txt is _not_ a __ name, false otherwise:
1164 # true if txt is _not_ a __ name, false otherwise:
1124 no__name = (lambda txt:
1165 no__name = (lambda txt:
1125 re.match(r'.*\.__.*?__',txt) is None)
1166 re.match(r'.*\.__.*?__',txt) is None)
1126 else:
1167 else:
1127 # true if txt is _not_ a _ name, false otherwise:
1168 # true if txt is _not_ a _ name, false otherwise:
1128 no__name = (lambda txt:
1169 no__name = (lambda txt:
1129 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1170 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1130 matches = filter(no__name, matches)
1171 matches = filter(no__name, matches)
1131 except NameError:
1172 except NameError:
1132 # catches <undefined attributes>.<tab>
1173 # catches <undefined attributes>.<tab>
1133 matches = []
1174 matches = []
1134 else:
1175 else:
1135 matches = self.global_matches(text)
1176 matches = self.global_matches(text)
1136 return matches
1177 return matches
1137
1178
1138 def _default_arguments_from_docstring(self, doc):
1179 def _default_arguments_from_docstring(self, doc):
1139 """Parse the first line of docstring for call signature.
1180 """Parse the first line of docstring for call signature.
1140
1181
1141 Docstring should be of the form 'min(iterable[, key=func])\n'.
1182 Docstring should be of the form 'min(iterable[, key=func])\n'.
1142 It can also parse cython docstring of the form
1183 It can also parse cython docstring of the form
1143 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1184 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1144 """
1185 """
1145 if doc is None:
1186 if doc is None:
1146 return []
1187 return []
1147
1188
1148 #care only the firstline
1189 #care only the firstline
1149 line = doc.lstrip().splitlines()[0]
1190 line = doc.lstrip().splitlines()[0]
1150
1191
1151 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1192 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1152 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1193 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1153 sig = self.docstring_sig_re.search(line)
1194 sig = self.docstring_sig_re.search(line)
1154 if sig is None:
1195 if sig is None:
1155 return []
1196 return []
1156 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1197 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1157 sig = sig.groups()[0].split(',')
1198 sig = sig.groups()[0].split(',')
1158 ret = []
1199 ret = []
1159 for s in sig:
1200 for s in sig:
1160 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1201 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1161 ret += self.docstring_kwd_re.findall(s)
1202 ret += self.docstring_kwd_re.findall(s)
1162 return ret
1203 return ret
1163
1204
1164 def _default_arguments(self, obj):
1205 def _default_arguments(self, obj):
1165 """Return the list of default arguments of obj if it is callable,
1206 """Return the list of default arguments of obj if it is callable,
1166 or empty list otherwise."""
1207 or empty list otherwise."""
1167 call_obj = obj
1208 call_obj = obj
1168 ret = []
1209 ret = []
1169 if inspect.isbuiltin(obj):
1210 if inspect.isbuiltin(obj):
1170 pass
1211 pass
1171 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1212 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1172 if inspect.isclass(obj):
1213 if inspect.isclass(obj):
1173 #for cython embededsignature=True the constructor docstring
1214 #for cython embededsignature=True the constructor docstring
1174 #belongs to the object itself not __init__
1215 #belongs to the object itself not __init__
1175 ret += self._default_arguments_from_docstring(
1216 ret += self._default_arguments_from_docstring(
1176 getattr(obj, '__doc__', ''))
1217 getattr(obj, '__doc__', ''))
1177 # for classes, check for __init__,__new__
1218 # for classes, check for __init__,__new__
1178 call_obj = (getattr(obj, '__init__', None) or
1219 call_obj = (getattr(obj, '__init__', None) or
1179 getattr(obj, '__new__', None))
1220 getattr(obj, '__new__', None))
1180 # for all others, check if they are __call__able
1221 # for all others, check if they are __call__able
1181 elif hasattr(obj, '__call__'):
1222 elif hasattr(obj, '__call__'):
1182 call_obj = obj.__call__
1223 call_obj = obj.__call__
1183 ret += self._default_arguments_from_docstring(
1224 ret += self._default_arguments_from_docstring(
1184 getattr(call_obj, '__doc__', ''))
1225 getattr(call_obj, '__doc__', ''))
1185
1226
1186 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1227 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1187 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1228 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1188
1229
1189 try:
1230 try:
1190 sig = inspect.signature(call_obj)
1231 sig = inspect.signature(call_obj)
1191 ret.extend(k for k, v in sig.parameters.items() if
1232 ret.extend(k for k, v in sig.parameters.items() if
1192 v.kind in _keeps)
1233 v.kind in _keeps)
1193 except ValueError:
1234 except ValueError:
1194 pass
1235 pass
1195
1236
1196 return list(set(ret))
1237 return list(set(ret))
1197
1238
1198 def python_func_kw_matches(self,text):
1239 def python_func_kw_matches(self,text):
1199 """Match named parameters (kwargs) of the last open function"""
1240 """Match named parameters (kwargs) of the last open function"""
1200
1241
1201 if "." in text: # a parameter cannot be dotted
1242 if "." in text: # a parameter cannot be dotted
1202 return []
1243 return []
1203 try: regexp = self.__funcParamsRegex
1244 try: regexp = self.__funcParamsRegex
1204 except AttributeError:
1245 except AttributeError:
1205 regexp = self.__funcParamsRegex = re.compile(r'''
1246 regexp = self.__funcParamsRegex = re.compile(r'''
1206 '.*?(?<!\\)' | # single quoted strings or
1247 '.*?(?<!\\)' | # single quoted strings or
1207 ".*?(?<!\\)" | # double quoted strings or
1248 ".*?(?<!\\)" | # double quoted strings or
1208 \w+ | # identifier
1249 \w+ | # identifier
1209 \S # other characters
1250 \S # other characters
1210 ''', re.VERBOSE | re.DOTALL)
1251 ''', re.VERBOSE | re.DOTALL)
1211 # 1. find the nearest identifier that comes before an unclosed
1252 # 1. find the nearest identifier that comes before an unclosed
1212 # parenthesis before the cursor
1253 # parenthesis before the cursor
1213 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1254 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1214 tokens = regexp.findall(self.text_until_cursor)
1255 tokens = regexp.findall(self.text_until_cursor)
1215 iterTokens = reversed(tokens); openPar = 0
1256 iterTokens = reversed(tokens); openPar = 0
1216
1257
1217 for token in iterTokens:
1258 for token in iterTokens:
1218 if token == ')':
1259 if token == ')':
1219 openPar -= 1
1260 openPar -= 1
1220 elif token == '(':
1261 elif token == '(':
1221 openPar += 1
1262 openPar += 1
1222 if openPar > 0:
1263 if openPar > 0:
1223 # found the last unclosed parenthesis
1264 # found the last unclosed parenthesis
1224 break
1265 break
1225 else:
1266 else:
1226 return []
1267 return []
1227 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1268 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1228 ids = []
1269 ids = []
1229 isId = re.compile(r'\w+$').match
1270 isId = re.compile(r'\w+$').match
1230
1271
1231 while True:
1272 while True:
1232 try:
1273 try:
1233 ids.append(next(iterTokens))
1274 ids.append(next(iterTokens))
1234 if not isId(ids[-1]):
1275 if not isId(ids[-1]):
1235 ids.pop(); break
1276 ids.pop(); break
1236 if not next(iterTokens) == '.':
1277 if not next(iterTokens) == '.':
1237 break
1278 break
1238 except StopIteration:
1279 except StopIteration:
1239 break
1280 break
1240
1281
1241 # Find all named arguments already assigned to, as to avoid suggesting
1282 # Find all named arguments already assigned to, as to avoid suggesting
1242 # them again
1283 # them again
1243 usedNamedArgs = set()
1284 usedNamedArgs = set()
1244 par_level = -1
1285 par_level = -1
1245 for token, next_token in zip(tokens, tokens[1:]):
1286 for token, next_token in zip(tokens, tokens[1:]):
1246 if token == '(':
1287 if token == '(':
1247 par_level += 1
1288 par_level += 1
1248 elif token == ')':
1289 elif token == ')':
1249 par_level -= 1
1290 par_level -= 1
1250
1291
1251 if par_level != 0:
1292 if par_level != 0:
1252 continue
1293 continue
1253
1294
1254 if next_token != '=':
1295 if next_token != '=':
1255 continue
1296 continue
1256
1297
1257 usedNamedArgs.add(token)
1298 usedNamedArgs.add(token)
1258
1299
1259 # lookup the candidate callable matches either using global_matches
1300 # lookup the candidate callable matches either using global_matches
1260 # or attr_matches for dotted names
1301 # or attr_matches for dotted names
1261 if len(ids) == 1:
1302 if len(ids) == 1:
1262 callableMatches = self.global_matches(ids[0])
1303 callableMatches = self.global_matches(ids[0])
1263 else:
1304 else:
1264 callableMatches = self.attr_matches('.'.join(ids[::-1]))
1305 callableMatches = self.attr_matches('.'.join(ids[::-1]))
1265 argMatches = []
1306 argMatches = []
1266 for callableMatch in callableMatches:
1307 for callableMatch in callableMatches:
1267 try:
1308 try:
1268 namedArgs = self._default_arguments(eval(callableMatch,
1309 namedArgs = self._default_arguments(eval(callableMatch,
1269 self.namespace))
1310 self.namespace))
1270 except:
1311 except:
1271 continue
1312 continue
1272
1313
1273 # Remove used named arguments from the list, no need to show twice
1314 # Remove used named arguments from the list, no need to show twice
1274 for namedArg in set(namedArgs) - usedNamedArgs:
1315 for namedArg in set(namedArgs) - usedNamedArgs:
1275 if namedArg.startswith(text):
1316 if namedArg.startswith(text):
1276 argMatches.append(u"%s=" %namedArg)
1317 argMatches.append(u"%s=" %namedArg)
1277 return argMatches
1318 return argMatches
1278
1319
1279 def dict_key_matches(self, text):
1320 def dict_key_matches(self, text):
1280 "Match string keys in a dictionary, after e.g. 'foo[' "
1321 "Match string keys in a dictionary, after e.g. 'foo[' "
1281 def get_keys(obj):
1322 def get_keys(obj):
1282 # Objects can define their own completions by defining an
1323 # Objects can define their own completions by defining an
1283 # _ipy_key_completions_() method.
1324 # _ipy_key_completions_() method.
1284 method = get_real_method(obj, '_ipython_key_completions_')
1325 method = get_real_method(obj, '_ipython_key_completions_')
1285 if method is not None:
1326 if method is not None:
1286 return method()
1327 return method()
1287
1328
1288 # Special case some common in-memory dict-like types
1329 # Special case some common in-memory dict-like types
1289 if isinstance(obj, dict) or\
1330 if isinstance(obj, dict) or\
1290 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1331 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1291 try:
1332 try:
1292 return list(obj.keys())
1333 return list(obj.keys())
1293 except Exception:
1334 except Exception:
1294 return []
1335 return []
1295 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1336 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1296 _safe_isinstance(obj, 'numpy', 'void'):
1337 _safe_isinstance(obj, 'numpy', 'void'):
1297 return obj.dtype.names or []
1338 return obj.dtype.names or []
1298 return []
1339 return []
1299
1340
1300 try:
1341 try:
1301 regexps = self.__dict_key_regexps
1342 regexps = self.__dict_key_regexps
1302 except AttributeError:
1343 except AttributeError:
1303 dict_key_re_fmt = r'''(?x)
1344 dict_key_re_fmt = r'''(?x)
1304 ( # match dict-referring expression wrt greedy setting
1345 ( # match dict-referring expression wrt greedy setting
1305 %s
1346 %s
1306 )
1347 )
1307 \[ # open bracket
1348 \[ # open bracket
1308 \s* # and optional whitespace
1349 \s* # and optional whitespace
1309 ([uUbB]? # string prefix (r not handled)
1350 ([uUbB]? # string prefix (r not handled)
1310 (?: # unclosed string
1351 (?: # unclosed string
1311 '(?:[^']|(?<!\\)\\')*
1352 '(?:[^']|(?<!\\)\\')*
1312 |
1353 |
1313 "(?:[^"]|(?<!\\)\\")*
1354 "(?:[^"]|(?<!\\)\\")*
1314 )
1355 )
1315 )?
1356 )?
1316 $
1357 $
1317 '''
1358 '''
1318 regexps = self.__dict_key_regexps = {
1359 regexps = self.__dict_key_regexps = {
1319 False: re.compile(dict_key_re_fmt % '''
1360 False: re.compile(dict_key_re_fmt % '''
1320 # identifiers separated by .
1361 # identifiers separated by .
1321 (?!\d)\w+
1362 (?!\d)\w+
1322 (?:\.(?!\d)\w+)*
1363 (?:\.(?!\d)\w+)*
1323 '''),
1364 '''),
1324 True: re.compile(dict_key_re_fmt % '''
1365 True: re.compile(dict_key_re_fmt % '''
1325 .+
1366 .+
1326 ''')
1367 ''')
1327 }
1368 }
1328
1369
1329 match = regexps[self.greedy].search(self.text_until_cursor)
1370 match = regexps[self.greedy].search(self.text_until_cursor)
1330 if match is None:
1371 if match is None:
1331 return []
1372 return []
1332
1373
1333 expr, prefix = match.groups()
1374 expr, prefix = match.groups()
1334 try:
1375 try:
1335 obj = eval(expr, self.namespace)
1376 obj = eval(expr, self.namespace)
1336 except Exception:
1377 except Exception:
1337 try:
1378 try:
1338 obj = eval(expr, self.global_namespace)
1379 obj = eval(expr, self.global_namespace)
1339 except Exception:
1380 except Exception:
1340 return []
1381 return []
1341
1382
1342 keys = get_keys(obj)
1383 keys = get_keys(obj)
1343 if not keys:
1384 if not keys:
1344 return keys
1385 return keys
1345 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1386 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1346 if not matches:
1387 if not matches:
1347 return matches
1388 return matches
1348
1389
1349 # get the cursor position of
1390 # get the cursor position of
1350 # - the text being completed
1391 # - the text being completed
1351 # - the start of the key text
1392 # - the start of the key text
1352 # - the start of the completion
1393 # - the start of the completion
1353 text_start = len(self.text_until_cursor) - len(text)
1394 text_start = len(self.text_until_cursor) - len(text)
1354 if prefix:
1395 if prefix:
1355 key_start = match.start(2)
1396 key_start = match.start(2)
1356 completion_start = key_start + token_offset
1397 completion_start = key_start + token_offset
1357 else:
1398 else:
1358 key_start = completion_start = match.end()
1399 key_start = completion_start = match.end()
1359
1400
1360 # grab the leading prefix, to make sure all completions start with `text`
1401 # grab the leading prefix, to make sure all completions start with `text`
1361 if text_start > key_start:
1402 if text_start > key_start:
1362 leading = ''
1403 leading = ''
1363 else:
1404 else:
1364 leading = text[text_start:completion_start]
1405 leading = text[text_start:completion_start]
1365
1406
1366 # the index of the `[` character
1407 # the index of the `[` character
1367 bracket_idx = match.end(1)
1408 bracket_idx = match.end(1)
1368
1409
1369 # append closing quote and bracket as appropriate
1410 # append closing quote and bracket as appropriate
1370 # this is *not* appropriate if the opening quote or bracket is outside
1411 # this is *not* appropriate if the opening quote or bracket is outside
1371 # the text given to this method
1412 # the text given to this method
1372 suf = ''
1413 suf = ''
1373 continuation = self.line_buffer[len(self.text_until_cursor):]
1414 continuation = self.line_buffer[len(self.text_until_cursor):]
1374 if key_start > text_start and closing_quote:
1415 if key_start > text_start and closing_quote:
1375 # quotes were opened inside text, maybe close them
1416 # quotes were opened inside text, maybe close them
1376 if continuation.startswith(closing_quote):
1417 if continuation.startswith(closing_quote):
1377 continuation = continuation[len(closing_quote):]
1418 continuation = continuation[len(closing_quote):]
1378 else:
1419 else:
1379 suf += closing_quote
1420 suf += closing_quote
1380 if bracket_idx > text_start:
1421 if bracket_idx > text_start:
1381 # brackets were opened inside text, maybe close them
1422 # brackets were opened inside text, maybe close them
1382 if not continuation.startswith(']'):
1423 if not continuation.startswith(']'):
1383 suf += ']'
1424 suf += ']'
1384
1425
1385 return [leading + k + suf for k in matches]
1426 return [leading + k + suf for k in matches]
1386
1427
1387 def unicode_name_matches(self, text):
1428 def unicode_name_matches(self, text):
1388 u"""Match Latex-like syntax for unicode characters base
1429 u"""Match Latex-like syntax for unicode characters base
1389 on the name of the character.
1430 on the name of the character.
1390
1431
1391 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
1432 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
1392
1433
1393 Works only on valid python 3 identifier, or on combining characters that
1434 Works only on valid python 3 identifier, or on combining characters that
1394 will combine to form a valid identifier.
1435 will combine to form a valid identifier.
1395
1436
1396 Used on Python 3 only.
1437 Used on Python 3 only.
1397 """
1438 """
1398 slashpos = text.rfind('\\')
1439 slashpos = text.rfind('\\')
1399 if slashpos > -1:
1440 if slashpos > -1:
1400 s = text[slashpos+1:]
1441 s = text[slashpos+1:]
1401 try :
1442 try :
1402 unic = unicodedata.lookup(s)
1443 unic = unicodedata.lookup(s)
1403 # allow combining chars
1444 # allow combining chars
1404 if ('a'+unic).isidentifier():
1445 if ('a'+unic).isidentifier():
1405 return '\\'+s,[unic]
1446 return '\\'+s,[unic]
1406 except KeyError:
1447 except KeyError:
1407 pass
1448 pass
1408 return u'', []
1449 return u'', []
1409
1450
1410
1451
1411 def latex_matches(self, text):
1452 def latex_matches(self, text):
1412 u"""Match Latex syntax for unicode characters.
1453 u"""Match Latex syntax for unicode characters.
1413
1454
1414 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
1455 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
1415
1456
1416 Used on Python 3 only.
1457 Used on Python 3 only.
1417 """
1458 """
1418 slashpos = text.rfind('\\')
1459 slashpos = text.rfind('\\')
1419 if slashpos > -1:
1460 if slashpos > -1:
1420 s = text[slashpos:]
1461 s = text[slashpos:]
1421 if s in latex_symbols:
1462 if s in latex_symbols:
1422 # Try to complete a full latex symbol to unicode
1463 # Try to complete a full latex symbol to unicode
1423 # \\alpha -> Ξ±
1464 # \\alpha -> Ξ±
1424 return s, [latex_symbols[s]]
1465 return s, [latex_symbols[s]]
1425 else:
1466 else:
1426 # If a user has partially typed a latex symbol, give them
1467 # If a user has partially typed a latex symbol, give them
1427 # a full list of options \al -> [\aleph, \alpha]
1468 # a full list of options \al -> [\aleph, \alpha]
1428 matches = [k for k in latex_symbols if k.startswith(s)]
1469 matches = [k for k in latex_symbols if k.startswith(s)]
1429 return s, matches
1470 return s, matches
1430 return u'', []
1471 return u'', []
1431
1472
1432 def dispatch_custom_completer(self, text):
1473 def dispatch_custom_completer(self, text):
1433 if not self.custom_completers:
1474 if not self.custom_completers:
1434 return
1475 return
1435
1476
1436 line = self.line_buffer
1477 line = self.line_buffer
1437 if not line.strip():
1478 if not line.strip():
1438 return None
1479 return None
1439
1480
1440 # Create a little structure to pass all the relevant information about
1481 # Create a little structure to pass all the relevant information about
1441 # the current completion to any custom completer.
1482 # the current completion to any custom completer.
1442 event = SimpleNamespace()
1483 event = SimpleNamespace()
1443 event.line = line
1484 event.line = line
1444 event.symbol = text
1485 event.symbol = text
1445 cmd = line.split(None,1)[0]
1486 cmd = line.split(None,1)[0]
1446 event.command = cmd
1487 event.command = cmd
1447 event.text_until_cursor = self.text_until_cursor
1488 event.text_until_cursor = self.text_until_cursor
1448
1489
1449 # for foo etc, try also to find completer for %foo
1490 # for foo etc, try also to find completer for %foo
1450 if not cmd.startswith(self.magic_escape):
1491 if not cmd.startswith(self.magic_escape):
1451 try_magic = self.custom_completers.s_matches(
1492 try_magic = self.custom_completers.s_matches(
1452 self.magic_escape + cmd)
1493 self.magic_escape + cmd)
1453 else:
1494 else:
1454 try_magic = []
1495 try_magic = []
1455
1496
1456 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1497 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1457 try_magic,
1498 try_magic,
1458 self.custom_completers.flat_matches(self.text_until_cursor)):
1499 self.custom_completers.flat_matches(self.text_until_cursor)):
1459 try:
1500 try:
1460 res = c(event)
1501 res = c(event)
1461 if res:
1502 if res:
1462 # first, try case sensitive match
1503 # first, try case sensitive match
1463 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1504 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1464 if withcase:
1505 if withcase:
1465 return withcase
1506 return withcase
1466 # if none, then case insensitive ones are ok too
1507 # if none, then case insensitive ones are ok too
1467 text_low = text.lower()
1508 text_low = text.lower()
1468 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1509 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1469 except TryNext:
1510 except TryNext:
1470 pass
1511 pass
1471
1512
1472 return None
1513 return None
1473
1514
1474 def completions(self, text: str, offset: int)->Iterator[Completion]:
1515 def completions(self, text: str, offset: int)->Iterator[Completion]:
1475 """
1516 """
1476 Returns an iterator over the possible completions
1517 Returns an iterator over the possible completions
1477
1518
1478 .. warning:: Unstable
1519 .. warning:: Unstable
1479
1520
1480 This function is unstable, API may change without warning.
1521 This function is unstable, API may change without warning.
1481 It will also raise unless use in proper context manager.
1522 It will also raise unless use in proper context manager.
1482
1523
1483 Parameters
1524 Parameters
1484 ----------
1525 ----------
1485
1526
1486 text:str
1527 text:str
1487 Full text of the current input, multi line string.
1528 Full text of the current input, multi line string.
1488 offset:int
1529 offset:int
1489 Integer representing the position of the cursor in ``text``. Offset
1530 Integer representing the position of the cursor in ``text``. Offset
1490 is 0-based indexed.
1531 is 0-based indexed.
1491
1532
1492 Yields
1533 Yields
1493 ------
1534 ------
1494 :any:`Completion` object
1535 :any:`Completion` object
1495
1536
1496
1537
1497 The cursor on a text can either be seen as being "in between"
1538 The cursor on a text can either be seen as being "in between"
1498 characters or "On" a character depending on the interface visible to
1539 characters or "On" a character depending on the interface visible to
1499 the user. For consistency the cursor being on "in between" characters X
1540 the user. For consistency the cursor being on "in between" characters X
1500 and Y is equivalent to the cursor being "on" character Y, that is to say
1541 and Y is equivalent to the cursor being "on" character Y, that is to say
1501 the character the cursor is on is considered as being after the cursor.
1542 the character the cursor is on is considered as being after the cursor.
1502
1543
1503 Combining characters may span more that one position in the
1544 Combining characters may span more that one position in the
1504 text.
1545 text.
1505
1546
1506
1547
1507 .. note::
1548 .. note::
1508
1549
1509 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1550 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1510 fake Completion token to distinguish completion returned by Jedi
1551 fake Completion token to distinguish completion returned by Jedi
1511 and usual IPython completion.
1552 and usual IPython completion.
1512
1553
1554 .. note::
1555
1556 Completions are not completely deduplicated yet. If identical
1557 completions are coming from different sources this function does not
1558 ensure that each completion object will only be present once.
1513 """
1559 """
1514 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1560 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1515 "It may change without warnings. "
1561 "It may change without warnings. "
1516 "Use in corresponding context manager.",
1562 "Use in corresponding context manager.",
1517 category=ProvisionalCompleterWarning, stacklevel=2)
1563 category=ProvisionalCompleterWarning, stacklevel=2)
1518
1564
1519 # Possible Improvements / Known limitation
1520 ##########################################
1521 # Completions may be identical even if they have different ranges and
1522 # text. For example:
1523 # >>> a=1
1524 # >>> a.<tab>
1525 # May returns:
1526 # - `a.real` from 0 to 2
1527 # - `.real` from 1 to 2
1528 # the current code does not (yet) check for such equivalence
1529 seen = set()
1565 seen = set()
1530 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1566 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1531 if c and (c in seen):
1567 if c and (c in seen):
1532 continue
1568 continue
1533 yield c
1569 yield c
1534 seen.add(c)
1570 seen.add(c)
1535
1571
1536 def _completions(self, full_text: str, offset: int, *, _timeout)->Iterator[Completion]:
1572 def _completions(self, full_text: str, offset: int, *, _timeout)->Iterator[Completion]:
1537 """
1573 """
1538 Core completion module.Same signature as :any:`completions`, with the
1574 Core completion module.Same signature as :any:`completions`, with the
1539 extra `timeout` parameter (in seconds).
1575 extra `timeout` parameter (in seconds).
1540
1576
1541
1577
1542 Computing jedi's completion ``.type`` can be quite expensive (it is a
1578 Computing jedi's completion ``.type`` can be quite expensive (it is a
1543 lazy property) and can require some warm-up, more warm up than just
1579 lazy property) and can require some warm-up, more warm up than just
1544 computing the ``name`` of a completion. The warm-up can be :
1580 computing the ``name`` of a completion. The warm-up can be :
1545
1581
1546 - Long warm-up the fisrt time a module is encountered after
1582 - Long warm-up the fisrt time a module is encountered after
1547 install/update: actually build parse/inference tree.
1583 install/update: actually build parse/inference tree.
1548
1584
1549 - first time the module is encountered in a session: load tree from
1585 - first time the module is encountered in a session: load tree from
1550 disk.
1586 disk.
1551
1587
1552 We don't want to block completions for tens of seconds so we give the
1588 We don't want to block completions for tens of seconds so we give the
1553 completer a "budget" of ``_timeout`` seconds per invocation to compute
1589 completer a "budget" of ``_timeout`` seconds per invocation to compute
1554 completions types, the completions that have not yet been computed will
1590 completions types, the completions that have not yet been computed will
1555 be marked as "unknown" an will have a chance to be computed next round
1591 be marked as "unknown" an will have a chance to be computed next round
1556 are things get cached.
1592 are things get cached.
1557
1593
1558 Keep in mind that Jedi is not the only thing treating the completion so
1594 Keep in mind that Jedi is not the only thing treating the completion so
1559 keep the timeout short-ish as if we take more than 0.3 second we still
1595 keep the timeout short-ish as if we take more than 0.3 second we still
1560 have lots of processing to do.
1596 have lots of processing to do.
1561
1597
1562 """
1598 """
1563 deadline = time.monotonic() + _timeout
1599 deadline = time.monotonic() + _timeout
1564
1600
1565
1601
1566 before = full_text[:offset]
1602 before = full_text[:offset]
1567 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1603 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1568
1604
1569 matched_text, matches, matches_origin, jedi_matches = self._complete(
1605 matched_text, matches, matches_origin, jedi_matches = self._complete(
1570 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1606 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1571
1607
1572 iter_jm = iter(jedi_matches)
1608 iter_jm = iter(jedi_matches)
1573 if _timeout:
1609 if _timeout:
1574 for jm in iter_jm:
1610 for jm in iter_jm:
1575 try:
1611 try:
1576 type_ = jm.type
1612 type_ = jm.type
1577 except Exception:
1613 except Exception:
1578 if self.debug:
1614 if self.debug:
1579 print("Error in Jedi getting type of ", jm)
1615 print("Error in Jedi getting type of ", jm)
1580 type_ = None
1616 type_ = None
1581 delta = len(jm.name_with_symbols) - len(jm.complete)
1617 delta = len(jm.name_with_symbols) - len(jm.complete)
1582 yield Completion(start=offset - delta,
1618 yield Completion(start=offset - delta,
1583 end=offset,
1619 end=offset,
1584 text=jm.name_with_symbols,
1620 text=jm.name_with_symbols,
1585 type=type_,
1621 type=type_,
1586 _origin='jedi')
1622 _origin='jedi')
1587
1623
1588 if time.monotonic() > deadline:
1624 if time.monotonic() > deadline:
1589 break
1625 break
1590
1626
1591 for jm in iter_jm:
1627 for jm in iter_jm:
1592 delta = len(jm.name_with_symbols) - len(jm.complete)
1628 delta = len(jm.name_with_symbols) - len(jm.complete)
1593 yield Completion(start=offset - delta,
1629 yield Completion(start=offset - delta,
1594 end=offset,
1630 end=offset,
1595 text=jm.name_with_symbols,
1631 text=jm.name_with_symbols,
1596 type='<unknown>', # don't compute type for speed
1632 type='<unknown>', # don't compute type for speed
1597 _origin='jedi')
1633 _origin='jedi')
1598
1634
1599
1635
1600 start_offset = before.rfind(matched_text)
1636 start_offset = before.rfind(matched_text)
1601
1637
1602 # TODO:
1638 # TODO:
1603 # Supress this, right now just for debug.
1639 # Supress this, right now just for debug.
1604 if jedi_matches and matches and self.debug:
1640 if jedi_matches and matches and self.debug:
1605 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--', _origin='debug')
1641 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--', _origin='debug')
1606
1642
1607 # I'm unsure if this is always true, so let's assert and see if it
1643 # I'm unsure if this is always true, so let's assert and see if it
1608 # crash
1644 # crash
1609 assert before.endswith(matched_text)
1645 assert before.endswith(matched_text)
1610 for m, t in zip(matches, matches_origin):
1646 for m, t in zip(matches, matches_origin):
1611 yield Completion(start=start_offset, end=offset, text=m, _origin=t)
1647 yield Completion(start=start_offset, end=offset, text=m, _origin=t)
1612
1648
1613
1649
1614 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1650 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1615 """Find completions for the given text and line context.
1651 """Find completions for the given text and line context.
1616
1652
1617 Note that both the text and the line_buffer are optional, but at least
1653 Note that both the text and the line_buffer are optional, but at least
1618 one of them must be given.
1654 one of them must be given.
1619
1655
1620 Parameters
1656 Parameters
1621 ----------
1657 ----------
1622 text : string, optional
1658 text : string, optional
1623 Text to perform the completion on. If not given, the line buffer
1659 Text to perform the completion on. If not given, the line buffer
1624 is split using the instance's CompletionSplitter object.
1660 is split using the instance's CompletionSplitter object.
1625
1661
1626 line_buffer : string, optional
1662 line_buffer : string, optional
1627 If not given, the completer attempts to obtain the current line
1663 If not given, the completer attempts to obtain the current line
1628 buffer via readline. This keyword allows clients which are
1664 buffer via readline. This keyword allows clients which are
1629 requesting for text completions in non-readline contexts to inform
1665 requesting for text completions in non-readline contexts to inform
1630 the completer of the entire text.
1666 the completer of the entire text.
1631
1667
1632 cursor_pos : int, optional
1668 cursor_pos : int, optional
1633 Index of the cursor in the full line buffer. Should be provided by
1669 Index of the cursor in the full line buffer. Should be provided by
1634 remote frontends where kernel has no access to frontend state.
1670 remote frontends where kernel has no access to frontend state.
1635
1671
1636 Returns
1672 Returns
1637 -------
1673 -------
1638 text : str
1674 text : str
1639 Text that was actually used in the completion.
1675 Text that was actually used in the completion.
1640
1676
1641 matches : list
1677 matches : list
1642 A list of completion matches.
1678 A list of completion matches.
1643
1679
1644
1680
1645 .. note::
1681 .. note::
1646
1682
1647 This API is likely to be deprecated and replaced by
1683 This API is likely to be deprecated and replaced by
1648 :any:`IPCompleter.completions` in the future.
1684 :any:`IPCompleter.completions` in the future.
1649
1685
1650
1686
1651 """
1687 """
1652 warnings.warn('`Completer.complete` is pending deprecation since '
1688 warnings.warn('`Completer.complete` is pending deprecation since '
1653 'IPython 6.0 and will be replaced by `Completer.completions`.',
1689 'IPython 6.0 and will be replaced by `Completer.completions`.',
1654 PendingDeprecationWarning)
1690 PendingDeprecationWarning)
1655 # potential todo, FOLD the 3rd throw away argument of _complete
1691 # potential todo, FOLD the 3rd throw away argument of _complete
1656 # into the first 2 one.
1692 # into the first 2 one.
1657 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
1693 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
1658
1694
1659 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
1695 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
1660 full_text=None, return_jedi_results=True) -> (str, List[str], List[object]):
1696 full_text=None, return_jedi_results=True) -> (str, List[str], List[object]):
1661 """
1697 """
1662
1698
1663 Like complete but can also returns raw jedi completions as well as the
1699 Like complete but can also returns raw jedi completions as well as the
1664 origin of the completion text. This could (and should) be made much
1700 origin of the completion text. This could (and should) be made much
1665 cleaner but that will be simpler once we drop the old (and stateful)
1701 cleaner but that will be simpler once we drop the old (and stateful)
1666 :any:`complete` API.
1702 :any:`complete` API.
1667
1703
1668
1704
1669 With current provisional API, cursor_pos act both (depending on the
1705 With current provisional API, cursor_pos act both (depending on the
1670 caller) as the offset in the ``text`` or ``line_buffer``, or as the
1706 caller) as the offset in the ``text`` or ``line_buffer``, or as the
1671 ``column`` when passing multiline strings this could/should be renamed
1707 ``column`` when passing multiline strings this could/should be renamed
1672 but would add extra noise.
1708 but would add extra noise.
1673 """
1709 """
1674
1710
1675 # if the cursor position isn't given, the only sane assumption we can
1711 # if the cursor position isn't given, the only sane assumption we can
1676 # make is that it's at the end of the line (the common case)
1712 # make is that it's at the end of the line (the common case)
1677 if cursor_pos is None:
1713 if cursor_pos is None:
1678 cursor_pos = len(line_buffer) if text is None else len(text)
1714 cursor_pos = len(line_buffer) if text is None else len(text)
1679
1715
1680 if self.use_main_ns:
1716 if self.use_main_ns:
1681 self.namespace = __main__.__dict__
1717 self.namespace = __main__.__dict__
1682
1718
1683 # if text is either None or an empty string, rely on the line buffer
1719 # if text is either None or an empty string, rely on the line buffer
1684 if (not line_buffer) and full_text:
1720 if (not line_buffer) and full_text:
1685 line_buffer = full_text.split('\n')[cursor_line]
1721 line_buffer = full_text.split('\n')[cursor_line]
1686 if not text:
1722 if not text:
1687 text = self.splitter.split_line(line_buffer, cursor_pos)
1723 text = self.splitter.split_line(line_buffer, cursor_pos)
1688
1724
1689 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1725 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1690 latex_text, latex_matches = self.latex_matches(base_text)
1726 latex_text, latex_matches = self.latex_matches(base_text)
1691 if latex_matches:
1727 if latex_matches:
1692 return latex_text, latex_matches, ['latex_matches']*len(latex_matches), ()
1728 return latex_text, latex_matches, ['latex_matches']*len(latex_matches), ()
1693 name_text = ''
1729 name_text = ''
1694 name_matches = []
1730 name_matches = []
1695 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1731 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1696 name_text, name_matches = meth(base_text)
1732 name_text, name_matches = meth(base_text)
1697 if name_text:
1733 if name_text:
1698 return name_text, name_matches, [meth.__qualname__]*len(name_matches), {}
1734 return name_text, name_matches, [meth.__qualname__]*len(name_matches), {}
1699
1735
1700
1736
1701 # If no line buffer is given, assume the input text is all there was
1737 # If no line buffer is given, assume the input text is all there was
1702 if line_buffer is None:
1738 if line_buffer is None:
1703 line_buffer = text
1739 line_buffer = text
1704
1740
1705 self.line_buffer = line_buffer
1741 self.line_buffer = line_buffer
1706 self.text_until_cursor = self.line_buffer[:cursor_pos]
1742 self.text_until_cursor = self.line_buffer[:cursor_pos]
1707
1743
1708 # Start with a clean slate of completions
1744 # Start with a clean slate of completions
1709 matches = []
1745 matches = []
1710 custom_res = self.dispatch_custom_completer(text)
1746 custom_res = self.dispatch_custom_completer(text)
1711 # FIXME: we should extend our api to return a dict with completions for
1747 # FIXME: we should extend our api to return a dict with completions for
1712 # different types of objects. The rlcomplete() method could then
1748 # different types of objects. The rlcomplete() method could then
1713 # simply collapse the dict into a list for readline, but we'd have
1749 # simply collapse the dict into a list for readline, but we'd have
1714 # richer completion semantics in other evironments.
1750 # richer completion semantics in other evironments.
1715 completions = ()
1751 completions = ()
1716 if self.use_jedi and return_jedi_results:
1752 if self.use_jedi and return_jedi_results:
1717 if not full_text:
1753 if not full_text:
1718 full_text = line_buffer
1754 full_text = line_buffer
1719 completions = self._jedi_matches(
1755 completions = self._jedi_matches(
1720 cursor_pos, cursor_line, full_text)
1756 cursor_pos, cursor_line, full_text)
1721 if custom_res is not None:
1757 if custom_res is not None:
1722 # did custom completers produce something?
1758 # did custom completers produce something?
1723 matches = [(m, 'custom') for m in custom_res]
1759 matches = [(m, 'custom') for m in custom_res]
1724 else:
1760 else:
1725 # Extend the list of completions with the results of each
1761 # Extend the list of completions with the results of each
1726 # matcher, so we return results to the user from all
1762 # matcher, so we return results to the user from all
1727 # namespaces.
1763 # namespaces.
1728 if self.merge_completions:
1764 if self.merge_completions:
1729 matches = []
1765 matches = []
1730 for matcher in self.matchers:
1766 for matcher in self.matchers:
1731 try:
1767 try:
1732 matches.extend([(m, matcher.__qualname__)
1768 matches.extend([(m, matcher.__qualname__)
1733 for m in matcher(text)])
1769 for m in matcher(text)])
1734 except:
1770 except:
1735 # Show the ugly traceback if the matcher causes an
1771 # Show the ugly traceback if the matcher causes an
1736 # exception, but do NOT crash the kernel!
1772 # exception, but do NOT crash the kernel!
1737 sys.excepthook(*sys.exc_info())
1773 sys.excepthook(*sys.exc_info())
1738 else:
1774 else:
1739 for matcher in self.matchers:
1775 for matcher in self.matchers:
1740 matches = [(m, matcher.__qualname__)
1776 matches = [(m, matcher.__qualname__)
1741 for m in matcher(text)]
1777 for m in matcher(text)]
1742 if matches:
1778 if matches:
1743 break
1779 break
1744 seen = set()
1780 seen = set()
1745 filtered_matches = set()
1781 filtered_matches = set()
1746 for m in matches:
1782 for m in matches:
1747 t, c = m
1783 t, c = m
1748 if t not in seen:
1784 if t not in seen:
1749 filtered_matches.add(m)
1785 filtered_matches.add(m)
1750 seen.add(t)
1786 seen.add(t)
1751
1787
1752 filtered_matches = sorted(
1788 filtered_matches = sorted(
1753 set(filtered_matches), key=lambda x: completions_sorting_key(x[0]))
1789 set(filtered_matches), key=lambda x: completions_sorting_key(x[0]))
1754
1790
1755 matches = [m[0] for m in filtered_matches]
1791 matches = [m[0] for m in filtered_matches]
1756 origins = [m[1] for m in filtered_matches]
1792 origins = [m[1] for m in filtered_matches]
1757
1793
1758 self.matches = matches
1794 self.matches = matches
1759
1795
1760 return text, matches, origins, completions
1796 return text, matches, origins, completions
@@ -1,843 +1,860 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Tests for the IPython tab-completion machinery."""
2 """Tests for the IPython tab-completion machinery."""
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import sys
8 import sys
9 import textwrap
9 import unittest
10 import unittest
10
11
11 from contextlib import contextmanager
12 from contextlib import contextmanager
12
13
13 import nose.tools as nt
14 import nose.tools as nt
14
15
15 from traitlets.config.loader import Config
16 from traitlets.config.loader import Config
16 from IPython import get_ipython
17 from IPython import get_ipython
17 from IPython.core import completer
18 from IPython.core import completer
18 from IPython.external.decorators import knownfailureif
19 from IPython.external.decorators import knownfailureif
19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 from IPython.utils.generics import complete_object
21 from IPython.utils.generics import complete_object
21 from IPython.testing import decorators as dec
22 from IPython.testing import decorators as dec
22
23
23 from IPython.core.completer import Completion, provisionalcompleter, match_dict_keys
24 from IPython.core.completer import (
25 Completion, provisionalcompleter, match_dict_keys, _deduplicate_completions)
24 from nose.tools import assert_in, assert_not_in
26 from nose.tools import assert_in, assert_not_in
25
27
26 #-----------------------------------------------------------------------------
28 #-----------------------------------------------------------------------------
27 # Test functions
29 # Test functions
28 #-----------------------------------------------------------------------------
30 #-----------------------------------------------------------------------------
29
31
30 @contextmanager
32 @contextmanager
31 def greedy_completion():
33 def greedy_completion():
32 ip = get_ipython()
34 ip = get_ipython()
33 greedy_original = ip.Completer.greedy
35 greedy_original = ip.Completer.greedy
34 try:
36 try:
35 ip.Completer.greedy = True
37 ip.Completer.greedy = True
36 yield
38 yield
37 finally:
39 finally:
38 ip.Completer.greedy = greedy_original
40 ip.Completer.greedy = greedy_original
39
41
40 def test_protect_filename():
42 def test_protect_filename():
41 if sys.platform == 'win32':
43 if sys.platform == 'win32':
42 pairs = [('abc','abc'),
44 pairs = [('abc','abc'),
43 (' abc','" abc"'),
45 (' abc','" abc"'),
44 ('a bc','"a bc"'),
46 ('a bc','"a bc"'),
45 ('a bc','"a bc"'),
47 ('a bc','"a bc"'),
46 (' bc','" bc"'),
48 (' bc','" bc"'),
47 ]
49 ]
48 else:
50 else:
49 pairs = [('abc','abc'),
51 pairs = [('abc','abc'),
50 (' abc',r'\ abc'),
52 (' abc',r'\ abc'),
51 ('a bc',r'a\ bc'),
53 ('a bc',r'a\ bc'),
52 ('a bc',r'a\ \ bc'),
54 ('a bc',r'a\ \ bc'),
53 (' bc',r'\ \ bc'),
55 (' bc',r'\ \ bc'),
54 # On posix, we also protect parens and other special characters.
56 # On posix, we also protect parens and other special characters.
55 ('a(bc',r'a\(bc'),
57 ('a(bc',r'a\(bc'),
56 ('a)bc',r'a\)bc'),
58 ('a)bc',r'a\)bc'),
57 ('a( )bc',r'a\(\ \)bc'),
59 ('a( )bc',r'a\(\ \)bc'),
58 ('a[1]bc', r'a\[1\]bc'),
60 ('a[1]bc', r'a\[1\]bc'),
59 ('a{1}bc', r'a\{1\}bc'),
61 ('a{1}bc', r'a\{1\}bc'),
60 ('a#bc', r'a\#bc'),
62 ('a#bc', r'a\#bc'),
61 ('a?bc', r'a\?bc'),
63 ('a?bc', r'a\?bc'),
62 ('a=bc', r'a\=bc'),
64 ('a=bc', r'a\=bc'),
63 ('a\\bc', r'a\\bc'),
65 ('a\\bc', r'a\\bc'),
64 ('a|bc', r'a\|bc'),
66 ('a|bc', r'a\|bc'),
65 ('a;bc', r'a\;bc'),
67 ('a;bc', r'a\;bc'),
66 ('a:bc', r'a\:bc'),
68 ('a:bc', r'a\:bc'),
67 ("a'bc", r"a\'bc"),
69 ("a'bc", r"a\'bc"),
68 ('a*bc', r'a\*bc'),
70 ('a*bc', r'a\*bc'),
69 ('a"bc', r'a\"bc'),
71 ('a"bc', r'a\"bc'),
70 ('a^bc', r'a\^bc'),
72 ('a^bc', r'a\^bc'),
71 ('a&bc', r'a\&bc'),
73 ('a&bc', r'a\&bc'),
72 ]
74 ]
73 # run the actual tests
75 # run the actual tests
74 for s1, s2 in pairs:
76 for s1, s2 in pairs:
75 s1p = completer.protect_filename(s1)
77 s1p = completer.protect_filename(s1)
76 nt.assert_equal(s1p, s2)
78 nt.assert_equal(s1p, s2)
77
79
78
80
79 def check_line_split(splitter, test_specs):
81 def check_line_split(splitter, test_specs):
80 for part1, part2, split in test_specs:
82 for part1, part2, split in test_specs:
81 cursor_pos = len(part1)
83 cursor_pos = len(part1)
82 line = part1+part2
84 line = part1+part2
83 out = splitter.split_line(line, cursor_pos)
85 out = splitter.split_line(line, cursor_pos)
84 nt.assert_equal(out, split)
86 nt.assert_equal(out, split)
85
87
86
88
87 def test_line_split():
89 def test_line_split():
88 """Basic line splitter test with default specs."""
90 """Basic line splitter test with default specs."""
89 sp = completer.CompletionSplitter()
91 sp = completer.CompletionSplitter()
90 # The format of the test specs is: part1, part2, expected answer. Parts 1
92 # The format of the test specs is: part1, part2, expected answer. Parts 1
91 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
93 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
92 # was at the end of part1. So an empty part2 represents someone hitting
94 # was at the end of part1. So an empty part2 represents someone hitting
93 # tab at the end of the line, the most common case.
95 # tab at the end of the line, the most common case.
94 t = [('run some/scrip', '', 'some/scrip'),
96 t = [('run some/scrip', '', 'some/scrip'),
95 ('run scripts/er', 'ror.py foo', 'scripts/er'),
97 ('run scripts/er', 'ror.py foo', 'scripts/er'),
96 ('echo $HOM', '', 'HOM'),
98 ('echo $HOM', '', 'HOM'),
97 ('print sys.pa', '', 'sys.pa'),
99 ('print sys.pa', '', 'sys.pa'),
98 ('print(sys.pa', '', 'sys.pa'),
100 ('print(sys.pa', '', 'sys.pa'),
99 ("execfile('scripts/er", '', 'scripts/er'),
101 ("execfile('scripts/er", '', 'scripts/er'),
100 ('a[x.', '', 'x.'),
102 ('a[x.', '', 'x.'),
101 ('a[x.', 'y', 'x.'),
103 ('a[x.', 'y', 'x.'),
102 ('cd "some_file/', '', 'some_file/'),
104 ('cd "some_file/', '', 'some_file/'),
103 ]
105 ]
104 check_line_split(sp, t)
106 check_line_split(sp, t)
105 # Ensure splitting works OK with unicode by re-running the tests with
107 # Ensure splitting works OK with unicode by re-running the tests with
106 # all inputs turned into unicode
108 # all inputs turned into unicode
107 check_line_split(sp, [ map(str, p) for p in t] )
109 check_line_split(sp, [ map(str, p) for p in t] )
108
110
109
111
110 def test_custom_completion_error():
112 def test_custom_completion_error():
111 """Test that errors from custom attribute completers are silenced."""
113 """Test that errors from custom attribute completers are silenced."""
112 ip = get_ipython()
114 ip = get_ipython()
113 class A(object): pass
115 class A(object): pass
114 ip.user_ns['a'] = A()
116 ip.user_ns['a'] = A()
115
117
116 @complete_object.when_type(A)
118 @complete_object.when_type(A)
117 def complete_A(a, existing_completions):
119 def complete_A(a, existing_completions):
118 raise TypeError("this should be silenced")
120 raise TypeError("this should be silenced")
119
121
120 ip.complete("a.")
122 ip.complete("a.")
121
123
122
124
123 def test_unicode_completions():
125 def test_unicode_completions():
124 ip = get_ipython()
126 ip = get_ipython()
125 # Some strings that trigger different types of completion. Check them both
127 # Some strings that trigger different types of completion. Check them both
126 # in str and unicode forms
128 # in str and unicode forms
127 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
129 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
128 for t in s + list(map(str, s)):
130 for t in s + list(map(str, s)):
129 # We don't need to check exact completion values (they may change
131 # We don't need to check exact completion values (they may change
130 # depending on the state of the namespace, but at least no exceptions
132 # depending on the state of the namespace, but at least no exceptions
131 # should be thrown and the return value should be a pair of text, list
133 # should be thrown and the return value should be a pair of text, list
132 # values.
134 # values.
133 text, matches = ip.complete(t)
135 text, matches = ip.complete(t)
134 nt.assert_true(isinstance(text, str))
136 nt.assert_true(isinstance(text, str))
135 nt.assert_true(isinstance(matches, list))
137 nt.assert_true(isinstance(matches, list))
136
138
137 def test_latex_completions():
139 def test_latex_completions():
138 from IPython.core.latex_symbols import latex_symbols
140 from IPython.core.latex_symbols import latex_symbols
139 import random
141 import random
140 ip = get_ipython()
142 ip = get_ipython()
141 # Test some random unicode symbols
143 # Test some random unicode symbols
142 keys = random.sample(latex_symbols.keys(), 10)
144 keys = random.sample(latex_symbols.keys(), 10)
143 for k in keys:
145 for k in keys:
144 text, matches = ip.complete(k)
146 text, matches = ip.complete(k)
145 nt.assert_equal(len(matches),1)
147 nt.assert_equal(len(matches),1)
146 nt.assert_equal(text, k)
148 nt.assert_equal(text, k)
147 nt.assert_equal(matches[0], latex_symbols[k])
149 nt.assert_equal(matches[0], latex_symbols[k])
148 # Test a more complex line
150 # Test a more complex line
149 text, matches = ip.complete(u'print(\\alpha')
151 text, matches = ip.complete(u'print(\\alpha')
150 nt.assert_equal(text, u'\\alpha')
152 nt.assert_equal(text, u'\\alpha')
151 nt.assert_equal(matches[0], latex_symbols['\\alpha'])
153 nt.assert_equal(matches[0], latex_symbols['\\alpha'])
152 # Test multiple matching latex symbols
154 # Test multiple matching latex symbols
153 text, matches = ip.complete(u'\\al')
155 text, matches = ip.complete(u'\\al')
154 nt.assert_in('\\alpha', matches)
156 nt.assert_in('\\alpha', matches)
155 nt.assert_in('\\aleph', matches)
157 nt.assert_in('\\aleph', matches)
156
158
157
159
158
160
159
161
160 def test_back_latex_completion():
162 def test_back_latex_completion():
161 ip = get_ipython()
163 ip = get_ipython()
162
164
163 # do not return more than 1 matches fro \beta, only the latex one.
165 # do not return more than 1 matches fro \beta, only the latex one.
164 name, matches = ip.complete('\\Ξ²')
166 name, matches = ip.complete('\\Ξ²')
165 nt.assert_equal(len(matches), 1)
167 nt.assert_equal(len(matches), 1)
166 nt.assert_equal(matches[0], '\\beta')
168 nt.assert_equal(matches[0], '\\beta')
167
169
168 def test_back_unicode_completion():
170 def test_back_unicode_completion():
169 ip = get_ipython()
171 ip = get_ipython()
170
172
171 name, matches = ip.complete('\\β…€')
173 name, matches = ip.complete('\\β…€')
172 nt.assert_equal(len(matches), 1)
174 nt.assert_equal(len(matches), 1)
173 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
175 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
174
176
175
177
176 def test_forward_unicode_completion():
178 def test_forward_unicode_completion():
177 ip = get_ipython()
179 ip = get_ipython()
178
180
179 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
181 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
180 nt.assert_equal(len(matches), 1)
182 nt.assert_equal(len(matches), 1)
181 nt.assert_equal(matches[0], 'β…€')
183 nt.assert_equal(matches[0], 'β…€')
182
184
183 @dec.knownfailureif(sys.platform == 'win32', 'Fails if there is a C:\\j... path')
185 @dec.knownfailureif(sys.platform == 'win32', 'Fails if there is a C:\\j... path')
184 def test_no_ascii_back_completion():
186 def test_no_ascii_back_completion():
185 ip = get_ipython()
187 ip = get_ipython()
186 with TemporaryWorkingDirectory(): # Avoid any filename completions
188 with TemporaryWorkingDirectory(): # Avoid any filename completions
187 # single ascii letter that don't have yet completions
189 # single ascii letter that don't have yet completions
188 for letter in 'jJ' :
190 for letter in 'jJ' :
189 name, matches = ip.complete('\\'+letter)
191 name, matches = ip.complete('\\'+letter)
190 nt.assert_equal(matches, [])
192 nt.assert_equal(matches, [])
191
193
192
194
193
195
194
196
195 class CompletionSplitterTestCase(unittest.TestCase):
197 class CompletionSplitterTestCase(unittest.TestCase):
196 def setUp(self):
198 def setUp(self):
197 self.sp = completer.CompletionSplitter()
199 self.sp = completer.CompletionSplitter()
198
200
199 def test_delim_setting(self):
201 def test_delim_setting(self):
200 self.sp.delims = ' '
202 self.sp.delims = ' '
201 nt.assert_equal(self.sp.delims, ' ')
203 nt.assert_equal(self.sp.delims, ' ')
202 nt.assert_equal(self.sp._delim_expr, '[\ ]')
204 nt.assert_equal(self.sp._delim_expr, '[\ ]')
203
205
204 def test_spaces(self):
206 def test_spaces(self):
205 """Test with only spaces as split chars."""
207 """Test with only spaces as split chars."""
206 self.sp.delims = ' '
208 self.sp.delims = ' '
207 t = [('foo', '', 'foo'),
209 t = [('foo', '', 'foo'),
208 ('run foo', '', 'foo'),
210 ('run foo', '', 'foo'),
209 ('run foo', 'bar', 'foo'),
211 ('run foo', 'bar', 'foo'),
210 ]
212 ]
211 check_line_split(self.sp, t)
213 check_line_split(self.sp, t)
212
214
213
215
214 def test_has_open_quotes1():
216 def test_has_open_quotes1():
215 for s in ["'", "'''", "'hi' '"]:
217 for s in ["'", "'''", "'hi' '"]:
216 nt.assert_equal(completer.has_open_quotes(s), "'")
218 nt.assert_equal(completer.has_open_quotes(s), "'")
217
219
218
220
219 def test_has_open_quotes2():
221 def test_has_open_quotes2():
220 for s in ['"', '"""', '"hi" "']:
222 for s in ['"', '"""', '"hi" "']:
221 nt.assert_equal(completer.has_open_quotes(s), '"')
223 nt.assert_equal(completer.has_open_quotes(s), '"')
222
224
223
225
224 def test_has_open_quotes3():
226 def test_has_open_quotes3():
225 for s in ["''", "''' '''", "'hi' 'ipython'"]:
227 for s in ["''", "''' '''", "'hi' 'ipython'"]:
226 nt.assert_false(completer.has_open_quotes(s))
228 nt.assert_false(completer.has_open_quotes(s))
227
229
228
230
229 def test_has_open_quotes4():
231 def test_has_open_quotes4():
230 for s in ['""', '""" """', '"hi" "ipython"']:
232 for s in ['""', '""" """', '"hi" "ipython"']:
231 nt.assert_false(completer.has_open_quotes(s))
233 nt.assert_false(completer.has_open_quotes(s))
232
234
233
235
234 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
236 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
235 def test_abspath_file_completions():
237 def test_abspath_file_completions():
236 ip = get_ipython()
238 ip = get_ipython()
237 with TemporaryDirectory() as tmpdir:
239 with TemporaryDirectory() as tmpdir:
238 prefix = os.path.join(tmpdir, 'foo')
240 prefix = os.path.join(tmpdir, 'foo')
239 suffixes = ['1', '2']
241 suffixes = ['1', '2']
240 names = [prefix+s for s in suffixes]
242 names = [prefix+s for s in suffixes]
241 for n in names:
243 for n in names:
242 open(n, 'w').close()
244 open(n, 'w').close()
243
245
244 # Check simple completion
246 # Check simple completion
245 c = ip.complete(prefix)[1]
247 c = ip.complete(prefix)[1]
246 nt.assert_equal(c, names)
248 nt.assert_equal(c, names)
247
249
248 # Now check with a function call
250 # Now check with a function call
249 cmd = 'a = f("%s' % prefix
251 cmd = 'a = f("%s' % prefix
250 c = ip.complete(prefix, cmd)[1]
252 c = ip.complete(prefix, cmd)[1]
251 comp = [prefix+s for s in suffixes]
253 comp = [prefix+s for s in suffixes]
252 nt.assert_equal(c, comp)
254 nt.assert_equal(c, comp)
253
255
254
256
255 def test_local_file_completions():
257 def test_local_file_completions():
256 ip = get_ipython()
258 ip = get_ipython()
257 with TemporaryWorkingDirectory():
259 with TemporaryWorkingDirectory():
258 prefix = './foo'
260 prefix = './foo'
259 suffixes = ['1', '2']
261 suffixes = ['1', '2']
260 names = [prefix+s for s in suffixes]
262 names = [prefix+s for s in suffixes]
261 for n in names:
263 for n in names:
262 open(n, 'w').close()
264 open(n, 'w').close()
263
265
264 # Check simple completion
266 # Check simple completion
265 c = ip.complete(prefix)[1]
267 c = ip.complete(prefix)[1]
266 nt.assert_equal(c, names)
268 nt.assert_equal(c, names)
267
269
268 # Now check with a function call
270 # Now check with a function call
269 cmd = 'a = f("%s' % prefix
271 cmd = 'a = f("%s' % prefix
270 c = ip.complete(prefix, cmd)[1]
272 c = ip.complete(prefix, cmd)[1]
271 comp = set(prefix+s for s in suffixes)
273 comp = set(prefix+s for s in suffixes)
272 nt.assert_true(comp.issubset(set(c)))
274 nt.assert_true(comp.issubset(set(c)))
273
275
274
276
275 def test_jedi():
277 def test_jedi():
276 """
278 """
277 A couple of issue we had with Jedi
279 A couple of issue we had with Jedi
278 """
280 """
279 ip = get_ipython()
281 ip = get_ipython()
280
282
281 def _test_complete(reason, s, comp, start=None, end=None):
283 def _test_complete(reason, s, comp, start=None, end=None):
282 l = len(s)
284 l = len(s)
283 start = start if start is not None else l
285 start = start if start is not None else l
284 end = end if end is not None else l
286 end = end if end is not None else l
285 with provisionalcompleter():
287 with provisionalcompleter():
286 completions = set(ip.Completer.completions(s, l))
288 completions = set(ip.Completer.completions(s, l))
287 assert_in(Completion(start, end, comp), completions, reason)
289 assert_in(Completion(start, end, comp), completions, reason)
288
290
289 def _test_not_complete(reason, s, comp):
291 def _test_not_complete(reason, s, comp):
290 l = len(s)
292 l = len(s)
291 with provisionalcompleter():
293 with provisionalcompleter():
292 completions = set(ip.Completer.completions(s, l))
294 completions = set(ip.Completer.completions(s, l))
293 assert_not_in(Completion(l, l, comp), completions, reason)
295 assert_not_in(Completion(l, l, comp), completions, reason)
294
296
295 import jedi
297 import jedi
296 jedi_version = tuple(int(i) for i in jedi.__version__.split('.')[:3])
298 jedi_version = tuple(int(i) for i in jedi.__version__.split('.')[:3])
297 if jedi_version > (0,10):
299 if jedi_version > (0, 10):
298 yield _test_complete, 'jedi >0.9 should complete and not crash', 'a=1;a.', 'real'
300 yield _test_complete, 'jedi >0.9 should complete and not crash', 'a=1;a.', 'real'
299 yield _test_complete, 'can infer first argument', 'a=(1,"foo");a[0].', 'real'
301 yield _test_complete, 'can infer first argument', 'a=(1,"foo");a[0].', 'real'
300 yield _test_complete, 'can infer second argument', 'a=(1,"foo");a[1].', 'capitalize'
302 yield _test_complete, 'can infer second argument', 'a=(1,"foo");a[1].', 'capitalize'
301 yield _test_complete, 'cover duplicate completions', 'im', 'import', 0, 2
303 yield _test_complete, 'cover duplicate completions', 'im', 'import', 0, 2
302
304
303 yield _test_not_complete, 'does not mix types', 'a=(1,"foo");a[0].', 'capitalize'
305 yield _test_not_complete, 'does not mix types', 'a=(1,"foo");a[0].', 'capitalize'
304
306
307 def test_deduplicate_completions():
308 """
309 Test that completions are correctly deduplicated (even if ranges are not the same)
310 """
311 ip = get_ipython()
312 ip.ex(textwrap.dedent('''
313 class Z:
314 zoo = 1
315 '''))
316 with provisionalcompleter():
317 l = list(_deduplicate_completions('Z.z', ip.Completer.completions('Z.z', 3)))
318
319 assert len(l) == 1, 'Completions (Z.z<tab>) correctly deduplicate: %s ' % l
320 assert l[0].text == 'zoo' # and not `it.accumulate`
321
305
322
306 def test_greedy_completions():
323 def test_greedy_completions():
307 """
324 """
308 Test the capability of the Greedy completer.
325 Test the capability of the Greedy completer.
309
326
310 Most of the test here do not really show off the greedy completer, for proof
327 Most of the test here do not really show off the greedy completer, for proof
311 each of the text bellow now pass with Jedi. The greedy completer is capable of more.
328 each of the text bellow now pass with Jedi. The greedy completer is capable of more.
312
329
313 See the :any:`test_dict_key_completion_contexts`
330 See the :any:`test_dict_key_completion_contexts`
314
331
315 """
332 """
316 ip = get_ipython()
333 ip = get_ipython()
317 ip.ex('a=list(range(5))')
334 ip.ex('a=list(range(5))')
318 _,c = ip.complete('.',line='a[0].')
335 _,c = ip.complete('.',line='a[0].')
319 nt.assert_false('.real' in c,
336 nt.assert_false('.real' in c,
320 "Shouldn't have completed on a[0]: %s"%c)
337 "Shouldn't have completed on a[0]: %s"%c)
321 with greedy_completion(), provisionalcompleter():
338 with greedy_completion(), provisionalcompleter():
322 def _(line, cursor_pos, expect, message, completion):
339 def _(line, cursor_pos, expect, message, completion):
323 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
340 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
324 with provisionalcompleter():
341 with provisionalcompleter():
325 completions = ip.Completer.completions(line, cursor_pos)
342 completions = ip.Completer.completions(line, cursor_pos)
326 nt.assert_in(expect, c, message%c)
343 nt.assert_in(expect, c, message%c)
327 nt.assert_in(completion, completions)
344 nt.assert_in(completion, completions)
328
345
329 yield _, 'a[0].', 5, 'a[0].real', "Should have completed on a[0].: %s", Completion(5,5, 'real')
346 yield _, 'a[0].', 5, 'a[0].real', "Should have completed on a[0].: %s", Completion(5,5, 'real')
330 yield _, 'a[0].r', 6, 'a[0].real', "Should have completed on a[0].r: %s", Completion(5,6, 'real')
347 yield _, 'a[0].r', 6, 'a[0].real', "Should have completed on a[0].r: %s", Completion(5,6, 'real')
331
348
332 if sys.version_info > (3, 4):
349 if sys.version_info > (3, 4):
333 yield _, 'a[0].from_', 10, 'a[0].from_bytes', "Should have completed on a[0].from_: %s", Completion(5, 10, 'from_bytes')
350 yield _, 'a[0].from_', 10, 'a[0].from_bytes', "Should have completed on a[0].from_: %s", Completion(5, 10, 'from_bytes')
334
351
335
352
336 def test_omit__names():
353 def test_omit__names():
337 # also happens to test IPCompleter as a configurable
354 # also happens to test IPCompleter as a configurable
338 ip = get_ipython()
355 ip = get_ipython()
339 ip._hidden_attr = 1
356 ip._hidden_attr = 1
340 ip._x = {}
357 ip._x = {}
341 c = ip.Completer
358 c = ip.Completer
342 ip.ex('ip=get_ipython()')
359 ip.ex('ip=get_ipython()')
343 cfg = Config()
360 cfg = Config()
344 cfg.IPCompleter.omit__names = 0
361 cfg.IPCompleter.omit__names = 0
345 c.update_config(cfg)
362 c.update_config(cfg)
346 with provisionalcompleter():
363 with provisionalcompleter():
347 s,matches = c.complete('ip.')
364 s,matches = c.complete('ip.')
348 completions = set(c.completions('ip.', 3))
365 completions = set(c.completions('ip.', 3))
349
366
350 nt.assert_in('ip.__str__', matches)
367 nt.assert_in('ip.__str__', matches)
351 nt.assert_in(Completion(3, 3, '__str__'), completions)
368 nt.assert_in(Completion(3, 3, '__str__'), completions)
352
369
353 nt.assert_in('ip._hidden_attr', matches)
370 nt.assert_in('ip._hidden_attr', matches)
354 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
371 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
355
372
356
373
357 cfg = Config()
374 cfg = Config()
358 cfg.IPCompleter.omit__names = 1
375 cfg.IPCompleter.omit__names = 1
359 c.update_config(cfg)
376 c.update_config(cfg)
360 with provisionalcompleter():
377 with provisionalcompleter():
361 s,matches = c.complete('ip.')
378 s,matches = c.complete('ip.')
362 completions = set(c.completions('ip.', 3))
379 completions = set(c.completions('ip.', 3))
363
380
364 nt.assert_not_in('ip.__str__', matches)
381 nt.assert_not_in('ip.__str__', matches)
365 nt.assert_not_in(Completion(3,3,'__str__'), completions)
382 nt.assert_not_in(Completion(3,3,'__str__'), completions)
366
383
367 # nt.assert_in('ip._hidden_attr', matches)
384 # nt.assert_in('ip._hidden_attr', matches)
368 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
385 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
369
386
370 cfg = Config()
387 cfg = Config()
371 cfg.IPCompleter.omit__names = 2
388 cfg.IPCompleter.omit__names = 2
372 c.update_config(cfg)
389 c.update_config(cfg)
373 with provisionalcompleter():
390 with provisionalcompleter():
374 s,matches = c.complete('ip.')
391 s,matches = c.complete('ip.')
375 completions = set(c.completions('ip.', 3))
392 completions = set(c.completions('ip.', 3))
376
393
377 nt.assert_not_in('ip.__str__', matches)
394 nt.assert_not_in('ip.__str__', matches)
378 nt.assert_not_in(Completion(3,3,'__str__'), completions)
395 nt.assert_not_in(Completion(3,3,'__str__'), completions)
379
396
380 nt.assert_not_in('ip._hidden_attr', matches)
397 nt.assert_not_in('ip._hidden_attr', matches)
381 nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
398 nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
382
399
383 with provisionalcompleter():
400 with provisionalcompleter():
384 s,matches = c.complete('ip._x.')
401 s,matches = c.complete('ip._x.')
385 completions = set(c.completions('ip._x.', 6))
402 completions = set(c.completions('ip._x.', 6))
386
403
387 nt.assert_in('ip._x.keys', matches)
404 nt.assert_in('ip._x.keys', matches)
388 nt.assert_in(Completion(6,6, "keys"), completions)
405 nt.assert_in(Completion(6,6, "keys"), completions)
389
406
390 del ip._hidden_attr
407 del ip._hidden_attr
391 del ip._x
408 del ip._x
392
409
393
410
394 def test_limit_to__all__False_ok():
411 def test_limit_to__all__False_ok():
395 """
412 """
396 Limit to all is deprecated, once we remove it this test can go away.
413 Limit to all is deprecated, once we remove it this test can go away.
397 """
414 """
398 ip = get_ipython()
415 ip = get_ipython()
399 c = ip.Completer
416 c = ip.Completer
400 ip.ex('class D: x=24')
417 ip.ex('class D: x=24')
401 ip.ex('d=D()')
418 ip.ex('d=D()')
402 cfg = Config()
419 cfg = Config()
403 cfg.IPCompleter.limit_to__all__ = False
420 cfg.IPCompleter.limit_to__all__ = False
404 c.update_config(cfg)
421 c.update_config(cfg)
405 s, matches = c.complete('d.')
422 s, matches = c.complete('d.')
406 nt.assert_in('d.x', matches)
423 nt.assert_in('d.x', matches)
407
424
408
425
409 def test_get__all__entries_ok():
426 def test_get__all__entries_ok():
410 class A(object):
427 class A(object):
411 __all__ = ['x', 1]
428 __all__ = ['x', 1]
412 words = completer.get__all__entries(A())
429 words = completer.get__all__entries(A())
413 nt.assert_equal(words, ['x'])
430 nt.assert_equal(words, ['x'])
414
431
415
432
416 def test_get__all__entries_no__all__ok():
433 def test_get__all__entries_no__all__ok():
417 class A(object):
434 class A(object):
418 pass
435 pass
419 words = completer.get__all__entries(A())
436 words = completer.get__all__entries(A())
420 nt.assert_equal(words, [])
437 nt.assert_equal(words, [])
421
438
422
439
423 def test_func_kw_completions():
440 def test_func_kw_completions():
424 ip = get_ipython()
441 ip = get_ipython()
425 c = ip.Completer
442 c = ip.Completer
426 ip.ex('def myfunc(a=1,b=2): return a+b')
443 ip.ex('def myfunc(a=1,b=2): return a+b')
427 s, matches = c.complete(None, 'myfunc(1,b')
444 s, matches = c.complete(None, 'myfunc(1,b')
428 nt.assert_in('b=', matches)
445 nt.assert_in('b=', matches)
429 # Simulate completing with cursor right after b (pos==10):
446 # Simulate completing with cursor right after b (pos==10):
430 s, matches = c.complete(None, 'myfunc(1,b)', 10)
447 s, matches = c.complete(None, 'myfunc(1,b)', 10)
431 nt.assert_in('b=', matches)
448 nt.assert_in('b=', matches)
432 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
449 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
433 nt.assert_in('b=', matches)
450 nt.assert_in('b=', matches)
434 #builtin function
451 #builtin function
435 s, matches = c.complete(None, 'min(k, k')
452 s, matches = c.complete(None, 'min(k, k')
436 nt.assert_in('key=', matches)
453 nt.assert_in('key=', matches)
437
454
438
455
439 def test_default_arguments_from_docstring():
456 def test_default_arguments_from_docstring():
440 ip = get_ipython()
457 ip = get_ipython()
441 c = ip.Completer
458 c = ip.Completer
442 kwd = c._default_arguments_from_docstring(
459 kwd = c._default_arguments_from_docstring(
443 'min(iterable[, key=func]) -> value')
460 'min(iterable[, key=func]) -> value')
444 nt.assert_equal(kwd, ['key'])
461 nt.assert_equal(kwd, ['key'])
445 #with cython type etc
462 #with cython type etc
446 kwd = c._default_arguments_from_docstring(
463 kwd = c._default_arguments_from_docstring(
447 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
464 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
448 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
465 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
449 #white spaces
466 #white spaces
450 kwd = c._default_arguments_from_docstring(
467 kwd = c._default_arguments_from_docstring(
451 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
468 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
452 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
469 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
453
470
454 def test_line_magics():
471 def test_line_magics():
455 ip = get_ipython()
472 ip = get_ipython()
456 c = ip.Completer
473 c = ip.Completer
457 s, matches = c.complete(None, 'lsmag')
474 s, matches = c.complete(None, 'lsmag')
458 nt.assert_in('%lsmagic', matches)
475 nt.assert_in('%lsmagic', matches)
459 s, matches = c.complete(None, '%lsmag')
476 s, matches = c.complete(None, '%lsmag')
460 nt.assert_in('%lsmagic', matches)
477 nt.assert_in('%lsmagic', matches)
461
478
462
479
463 def test_cell_magics():
480 def test_cell_magics():
464 from IPython.core.magic import register_cell_magic
481 from IPython.core.magic import register_cell_magic
465
482
466 @register_cell_magic
483 @register_cell_magic
467 def _foo_cellm(line, cell):
484 def _foo_cellm(line, cell):
468 pass
485 pass
469
486
470 ip = get_ipython()
487 ip = get_ipython()
471 c = ip.Completer
488 c = ip.Completer
472
489
473 s, matches = c.complete(None, '_foo_ce')
490 s, matches = c.complete(None, '_foo_ce')
474 nt.assert_in('%%_foo_cellm', matches)
491 nt.assert_in('%%_foo_cellm', matches)
475 s, matches = c.complete(None, '%%_foo_ce')
492 s, matches = c.complete(None, '%%_foo_ce')
476 nt.assert_in('%%_foo_cellm', matches)
493 nt.assert_in('%%_foo_cellm', matches)
477
494
478
495
479 def test_line_cell_magics():
496 def test_line_cell_magics():
480 from IPython.core.magic import register_line_cell_magic
497 from IPython.core.magic import register_line_cell_magic
481
498
482 @register_line_cell_magic
499 @register_line_cell_magic
483 def _bar_cellm(line, cell):
500 def _bar_cellm(line, cell):
484 pass
501 pass
485
502
486 ip = get_ipython()
503 ip = get_ipython()
487 c = ip.Completer
504 c = ip.Completer
488
505
489 # The policy here is trickier, see comments in completion code. The
506 # The policy here is trickier, see comments in completion code. The
490 # returned values depend on whether the user passes %% or not explicitly,
507 # returned values depend on whether the user passes %% or not explicitly,
491 # and this will show a difference if the same name is both a line and cell
508 # and this will show a difference if the same name is both a line and cell
492 # magic.
509 # magic.
493 s, matches = c.complete(None, '_bar_ce')
510 s, matches = c.complete(None, '_bar_ce')
494 nt.assert_in('%_bar_cellm', matches)
511 nt.assert_in('%_bar_cellm', matches)
495 nt.assert_in('%%_bar_cellm', matches)
512 nt.assert_in('%%_bar_cellm', matches)
496 s, matches = c.complete(None, '%_bar_ce')
513 s, matches = c.complete(None, '%_bar_ce')
497 nt.assert_in('%_bar_cellm', matches)
514 nt.assert_in('%_bar_cellm', matches)
498 nt.assert_in('%%_bar_cellm', matches)
515 nt.assert_in('%%_bar_cellm', matches)
499 s, matches = c.complete(None, '%%_bar_ce')
516 s, matches = c.complete(None, '%%_bar_ce')
500 nt.assert_not_in('%_bar_cellm', matches)
517 nt.assert_not_in('%_bar_cellm', matches)
501 nt.assert_in('%%_bar_cellm', matches)
518 nt.assert_in('%%_bar_cellm', matches)
502
519
503
520
504 def test_magic_completion_order():
521 def test_magic_completion_order():
505
522
506 ip = get_ipython()
523 ip = get_ipython()
507 c = ip.Completer
524 c = ip.Completer
508
525
509 # Test ordering of magics and non-magics with the same name
526 # Test ordering of magics and non-magics with the same name
510 # We want the non-magic first
527 # We want the non-magic first
511
528
512 # Before importing matplotlib, there should only be one option:
529 # Before importing matplotlib, there should only be one option:
513
530
514 text, matches = c.complete('mat')
531 text, matches = c.complete('mat')
515 nt.assert_equal(matches, ["%matplotlib"])
532 nt.assert_equal(matches, ["%matplotlib"])
516
533
517
534
518 ip.run_cell("matplotlib = 1") # introduce name into namespace
535 ip.run_cell("matplotlib = 1") # introduce name into namespace
519
536
520 # After the import, there should be two options, ordered like this:
537 # After the import, there should be two options, ordered like this:
521 text, matches = c.complete('mat')
538 text, matches = c.complete('mat')
522 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
539 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
523
540
524
541
525 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
542 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
526
543
527 # Order of user variable and line and cell magics with same name:
544 # Order of user variable and line and cell magics with same name:
528 text, matches = c.complete('timeit')
545 text, matches = c.complete('timeit')
529 nt.assert_equal(matches, ["timeit", "%timeit", "%%timeit"])
546 nt.assert_equal(matches, ["timeit", "%timeit", "%%timeit"])
530
547
531 def test_match_dict_keys():
548 def test_match_dict_keys():
532 """
549 """
533 Test that match_dict_keys works on a couple of use case does return what
550 Test that match_dict_keys works on a couple of use case does return what
534 expected, and does not crash
551 expected, and does not crash
535 """
552 """
536 delims = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
553 delims = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
537
554
538
555
539 keys = ['foo', b'far']
556 keys = ['foo', b'far']
540 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2 ,['far'])
557 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2 ,['far'])
541 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2 ,['far'])
558 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2 ,['far'])
542 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2 ,['far'])
559 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2 ,['far'])
543 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2 ,['far'])
560 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2 ,['far'])
544
561
545 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1 ,['foo'])
562 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1 ,['foo'])
546 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1 ,['foo'])
563 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1 ,['foo'])
547 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1 ,['foo'])
564 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1 ,['foo'])
548 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1 ,['foo'])
565 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1 ,['foo'])
549
566
550 match_dict_keys
567 match_dict_keys
551
568
552
569
553 def test_dict_key_completion_string():
570 def test_dict_key_completion_string():
554 """Test dictionary key completion for string keys"""
571 """Test dictionary key completion for string keys"""
555 ip = get_ipython()
572 ip = get_ipython()
556 complete = ip.Completer.complete
573 complete = ip.Completer.complete
557
574
558 ip.user_ns['d'] = {'abc': None}
575 ip.user_ns['d'] = {'abc': None}
559
576
560 # check completion at different stages
577 # check completion at different stages
561 _, matches = complete(line_buffer="d[")
578 _, matches = complete(line_buffer="d[")
562 nt.assert_in("'abc'", matches)
579 nt.assert_in("'abc'", matches)
563 nt.assert_not_in("'abc']", matches)
580 nt.assert_not_in("'abc']", matches)
564
581
565 _, matches = complete(line_buffer="d['")
582 _, matches = complete(line_buffer="d['")
566 nt.assert_in("abc", matches)
583 nt.assert_in("abc", matches)
567 nt.assert_not_in("abc']", matches)
584 nt.assert_not_in("abc']", matches)
568
585
569 _, matches = complete(line_buffer="d['a")
586 _, matches = complete(line_buffer="d['a")
570 nt.assert_in("abc", matches)
587 nt.assert_in("abc", matches)
571 nt.assert_not_in("abc']", matches)
588 nt.assert_not_in("abc']", matches)
572
589
573 # check use of different quoting
590 # check use of different quoting
574 _, matches = complete(line_buffer="d[\"")
591 _, matches = complete(line_buffer="d[\"")
575 nt.assert_in("abc", matches)
592 nt.assert_in("abc", matches)
576 nt.assert_not_in('abc\"]', matches)
593 nt.assert_not_in('abc\"]', matches)
577
594
578 _, matches = complete(line_buffer="d[\"a")
595 _, matches = complete(line_buffer="d[\"a")
579 nt.assert_in("abc", matches)
596 nt.assert_in("abc", matches)
580 nt.assert_not_in('abc\"]', matches)
597 nt.assert_not_in('abc\"]', matches)
581
598
582 # check sensitivity to following context
599 # check sensitivity to following context
583 _, matches = complete(line_buffer="d[]", cursor_pos=2)
600 _, matches = complete(line_buffer="d[]", cursor_pos=2)
584 nt.assert_in("'abc'", matches)
601 nt.assert_in("'abc'", matches)
585
602
586 _, matches = complete(line_buffer="d['']", cursor_pos=3)
603 _, matches = complete(line_buffer="d['']", cursor_pos=3)
587 nt.assert_in("abc", matches)
604 nt.assert_in("abc", matches)
588 nt.assert_not_in("abc'", matches)
605 nt.assert_not_in("abc'", matches)
589 nt.assert_not_in("abc']", matches)
606 nt.assert_not_in("abc']", matches)
590
607
591 # check multiple solutions are correctly returned and that noise is not
608 # check multiple solutions are correctly returned and that noise is not
592 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
609 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
593 5: None}
610 5: None}
594
611
595 _, matches = complete(line_buffer="d['a")
612 _, matches = complete(line_buffer="d['a")
596 nt.assert_in("abc", matches)
613 nt.assert_in("abc", matches)
597 nt.assert_in("abd", matches)
614 nt.assert_in("abd", matches)
598 nt.assert_not_in("bad", matches)
615 nt.assert_not_in("bad", matches)
599 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
616 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
600
617
601 # check escaping and whitespace
618 # check escaping and whitespace
602 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
619 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
603 _, matches = complete(line_buffer="d['a")
620 _, matches = complete(line_buffer="d['a")
604 nt.assert_in("a\\nb", matches)
621 nt.assert_in("a\\nb", matches)
605 nt.assert_in("a\\'b", matches)
622 nt.assert_in("a\\'b", matches)
606 nt.assert_in("a\"b", matches)
623 nt.assert_in("a\"b", matches)
607 nt.assert_in("a word", matches)
624 nt.assert_in("a word", matches)
608 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
625 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
609
626
610 # - can complete on non-initial word of the string
627 # - can complete on non-initial word of the string
611 _, matches = complete(line_buffer="d['a w")
628 _, matches = complete(line_buffer="d['a w")
612 nt.assert_in("word", matches)
629 nt.assert_in("word", matches)
613
630
614 # - understands quote escaping
631 # - understands quote escaping
615 _, matches = complete(line_buffer="d['a\\'")
632 _, matches = complete(line_buffer="d['a\\'")
616 nt.assert_in("b", matches)
633 nt.assert_in("b", matches)
617
634
618 # - default quoting should work like repr
635 # - default quoting should work like repr
619 _, matches = complete(line_buffer="d[")
636 _, matches = complete(line_buffer="d[")
620 nt.assert_in("\"a'b\"", matches)
637 nt.assert_in("\"a'b\"", matches)
621
638
622 # - when opening quote with ", possible to match with unescaped apostrophe
639 # - when opening quote with ", possible to match with unescaped apostrophe
623 _, matches = complete(line_buffer="d[\"a'")
640 _, matches = complete(line_buffer="d[\"a'")
624 nt.assert_in("b", matches)
641 nt.assert_in("b", matches)
625
642
626 # need to not split at delims that readline won't split at
643 # need to not split at delims that readline won't split at
627 if '-' not in ip.Completer.splitter.delims:
644 if '-' not in ip.Completer.splitter.delims:
628 ip.user_ns['d'] = {'before-after': None}
645 ip.user_ns['d'] = {'before-after': None}
629 _, matches = complete(line_buffer="d['before-af")
646 _, matches = complete(line_buffer="d['before-af")
630 nt.assert_in('before-after', matches)
647 nt.assert_in('before-after', matches)
631
648
632 def test_dict_key_completion_contexts():
649 def test_dict_key_completion_contexts():
633 """Test expression contexts in which dict key completion occurs"""
650 """Test expression contexts in which dict key completion occurs"""
634 ip = get_ipython()
651 ip = get_ipython()
635 complete = ip.Completer.complete
652 complete = ip.Completer.complete
636 d = {'abc': None}
653 d = {'abc': None}
637 ip.user_ns['d'] = d
654 ip.user_ns['d'] = d
638
655
639 class C:
656 class C:
640 data = d
657 data = d
641 ip.user_ns['C'] = C
658 ip.user_ns['C'] = C
642 ip.user_ns['get'] = lambda: d
659 ip.user_ns['get'] = lambda: d
643
660
644 def assert_no_completion(**kwargs):
661 def assert_no_completion(**kwargs):
645 _, matches = complete(**kwargs)
662 _, matches = complete(**kwargs)
646 nt.assert_not_in('abc', matches)
663 nt.assert_not_in('abc', matches)
647 nt.assert_not_in('abc\'', matches)
664 nt.assert_not_in('abc\'', matches)
648 nt.assert_not_in('abc\']', matches)
665 nt.assert_not_in('abc\']', matches)
649 nt.assert_not_in('\'abc\'', matches)
666 nt.assert_not_in('\'abc\'', matches)
650 nt.assert_not_in('\'abc\']', matches)
667 nt.assert_not_in('\'abc\']', matches)
651
668
652 def assert_completion(**kwargs):
669 def assert_completion(**kwargs):
653 _, matches = complete(**kwargs)
670 _, matches = complete(**kwargs)
654 nt.assert_in("'abc'", matches)
671 nt.assert_in("'abc'", matches)
655 nt.assert_not_in("'abc']", matches)
672 nt.assert_not_in("'abc']", matches)
656
673
657 # no completion after string closed, even if reopened
674 # no completion after string closed, even if reopened
658 assert_no_completion(line_buffer="d['a'")
675 assert_no_completion(line_buffer="d['a'")
659 assert_no_completion(line_buffer="d[\"a\"")
676 assert_no_completion(line_buffer="d[\"a\"")
660 assert_no_completion(line_buffer="d['a' + ")
677 assert_no_completion(line_buffer="d['a' + ")
661 assert_no_completion(line_buffer="d['a' + '")
678 assert_no_completion(line_buffer="d['a' + '")
662
679
663 # completion in non-trivial expressions
680 # completion in non-trivial expressions
664 assert_completion(line_buffer="+ d[")
681 assert_completion(line_buffer="+ d[")
665 assert_completion(line_buffer="(d[")
682 assert_completion(line_buffer="(d[")
666 assert_completion(line_buffer="C.data[")
683 assert_completion(line_buffer="C.data[")
667
684
668 # greedy flag
685 # greedy flag
669 def assert_completion(**kwargs):
686 def assert_completion(**kwargs):
670 _, matches = complete(**kwargs)
687 _, matches = complete(**kwargs)
671 nt.assert_in("get()['abc']", matches)
688 nt.assert_in("get()['abc']", matches)
672
689
673 assert_no_completion(line_buffer="get()[")
690 assert_no_completion(line_buffer="get()[")
674 with greedy_completion():
691 with greedy_completion():
675 assert_completion(line_buffer="get()[")
692 assert_completion(line_buffer="get()[")
676 assert_completion(line_buffer="get()['")
693 assert_completion(line_buffer="get()['")
677 assert_completion(line_buffer="get()['a")
694 assert_completion(line_buffer="get()['a")
678 assert_completion(line_buffer="get()['ab")
695 assert_completion(line_buffer="get()['ab")
679 assert_completion(line_buffer="get()['abc")
696 assert_completion(line_buffer="get()['abc")
680
697
681
698
682
699
683 def test_dict_key_completion_bytes():
700 def test_dict_key_completion_bytes():
684 """Test handling of bytes in dict key completion"""
701 """Test handling of bytes in dict key completion"""
685 ip = get_ipython()
702 ip = get_ipython()
686 complete = ip.Completer.complete
703 complete = ip.Completer.complete
687
704
688 ip.user_ns['d'] = {'abc': None, b'abd': None}
705 ip.user_ns['d'] = {'abc': None, b'abd': None}
689
706
690 _, matches = complete(line_buffer="d[")
707 _, matches = complete(line_buffer="d[")
691 nt.assert_in("'abc'", matches)
708 nt.assert_in("'abc'", matches)
692 nt.assert_in("b'abd'", matches)
709 nt.assert_in("b'abd'", matches)
693
710
694 if False: # not currently implemented
711 if False: # not currently implemented
695 _, matches = complete(line_buffer="d[b")
712 _, matches = complete(line_buffer="d[b")
696 nt.assert_in("b'abd'", matches)
713 nt.assert_in("b'abd'", matches)
697 nt.assert_not_in("b'abc'", matches)
714 nt.assert_not_in("b'abc'", matches)
698
715
699 _, matches = complete(line_buffer="d[b'")
716 _, matches = complete(line_buffer="d[b'")
700 nt.assert_in("abd", matches)
717 nt.assert_in("abd", matches)
701 nt.assert_not_in("abc", matches)
718 nt.assert_not_in("abc", matches)
702
719
703 _, matches = complete(line_buffer="d[B'")
720 _, matches = complete(line_buffer="d[B'")
704 nt.assert_in("abd", matches)
721 nt.assert_in("abd", matches)
705 nt.assert_not_in("abc", matches)
722 nt.assert_not_in("abc", matches)
706
723
707 _, matches = complete(line_buffer="d['")
724 _, matches = complete(line_buffer="d['")
708 nt.assert_in("abc", matches)
725 nt.assert_in("abc", matches)
709 nt.assert_not_in("abd", matches)
726 nt.assert_not_in("abd", matches)
710
727
711
728
712 def test_dict_key_completion_unicode_py3():
729 def test_dict_key_completion_unicode_py3():
713 """Test handling of unicode in dict key completion"""
730 """Test handling of unicode in dict key completion"""
714 ip = get_ipython()
731 ip = get_ipython()
715 complete = ip.Completer.complete
732 complete = ip.Completer.complete
716
733
717 ip.user_ns['d'] = {u'a\u05d0': None}
734 ip.user_ns['d'] = {u'a\u05d0': None}
718
735
719 # query using escape
736 # query using escape
720 if sys.platform != 'win32':
737 if sys.platform != 'win32':
721 # Known failure on Windows
738 # Known failure on Windows
722 _, matches = complete(line_buffer="d['a\\u05d0")
739 _, matches = complete(line_buffer="d['a\\u05d0")
723 nt.assert_in("u05d0", matches) # tokenized after \\
740 nt.assert_in("u05d0", matches) # tokenized after \\
724
741
725 # query using character
742 # query using character
726 _, matches = complete(line_buffer="d['a\u05d0")
743 _, matches = complete(line_buffer="d['a\u05d0")
727 nt.assert_in(u"a\u05d0", matches)
744 nt.assert_in(u"a\u05d0", matches)
728
745
729 with greedy_completion():
746 with greedy_completion():
730 # query using escape
747 # query using escape
731 _, matches = complete(line_buffer="d['a\\u05d0")
748 _, matches = complete(line_buffer="d['a\\u05d0")
732 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
749 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
733
750
734 # query using character
751 # query using character
735 _, matches = complete(line_buffer="d['a\u05d0")
752 _, matches = complete(line_buffer="d['a\u05d0")
736 nt.assert_in(u"d['a\u05d0']", matches)
753 nt.assert_in(u"d['a\u05d0']", matches)
737
754
738
755
739
756
740 @dec.skip_without('numpy')
757 @dec.skip_without('numpy')
741 def test_struct_array_key_completion():
758 def test_struct_array_key_completion():
742 """Test dict key completion applies to numpy struct arrays"""
759 """Test dict key completion applies to numpy struct arrays"""
743 import numpy
760 import numpy
744 ip = get_ipython()
761 ip = get_ipython()
745 complete = ip.Completer.complete
762 complete = ip.Completer.complete
746 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
763 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
747 _, matches = complete(line_buffer="d['")
764 _, matches = complete(line_buffer="d['")
748 nt.assert_in("hello", matches)
765 nt.assert_in("hello", matches)
749 nt.assert_in("world", matches)
766 nt.assert_in("world", matches)
750 # complete on the numpy struct itself
767 # complete on the numpy struct itself
751 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
768 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
752 ('my_data', '>f4', 5)])
769 ('my_data', '>f4', 5)])
753 x = numpy.zeros(2, dtype=dt)
770 x = numpy.zeros(2, dtype=dt)
754 ip.user_ns['d'] = x[1]
771 ip.user_ns['d'] = x[1]
755 _, matches = complete(line_buffer="d['")
772 _, matches = complete(line_buffer="d['")
756 nt.assert_in("my_head", matches)
773 nt.assert_in("my_head", matches)
757 nt.assert_in("my_data", matches)
774 nt.assert_in("my_data", matches)
758 # complete on a nested level
775 # complete on a nested level
759 with greedy_completion():
776 with greedy_completion():
760 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
777 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
761 _, matches = complete(line_buffer="d[1]['my_head']['")
778 _, matches = complete(line_buffer="d[1]['my_head']['")
762 nt.assert_true(any(["my_dt" in m for m in matches]))
779 nt.assert_true(any(["my_dt" in m for m in matches]))
763 nt.assert_true(any(["my_df" in m for m in matches]))
780 nt.assert_true(any(["my_df" in m for m in matches]))
764
781
765
782
766 @dec.skip_without('pandas')
783 @dec.skip_without('pandas')
767 def test_dataframe_key_completion():
784 def test_dataframe_key_completion():
768 """Test dict key completion applies to pandas DataFrames"""
785 """Test dict key completion applies to pandas DataFrames"""
769 import pandas
786 import pandas
770 ip = get_ipython()
787 ip = get_ipython()
771 complete = ip.Completer.complete
788 complete = ip.Completer.complete
772 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
789 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
773 _, matches = complete(line_buffer="d['")
790 _, matches = complete(line_buffer="d['")
774 nt.assert_in("hello", matches)
791 nt.assert_in("hello", matches)
775 nt.assert_in("world", matches)
792 nt.assert_in("world", matches)
776
793
777
794
778 def test_dict_key_completion_invalids():
795 def test_dict_key_completion_invalids():
779 """Smoke test cases dict key completion can't handle"""
796 """Smoke test cases dict key completion can't handle"""
780 ip = get_ipython()
797 ip = get_ipython()
781 complete = ip.Completer.complete
798 complete = ip.Completer.complete
782
799
783 ip.user_ns['no_getitem'] = None
800 ip.user_ns['no_getitem'] = None
784 ip.user_ns['no_keys'] = []
801 ip.user_ns['no_keys'] = []
785 ip.user_ns['cant_call_keys'] = dict
802 ip.user_ns['cant_call_keys'] = dict
786 ip.user_ns['empty'] = {}
803 ip.user_ns['empty'] = {}
787 ip.user_ns['d'] = {'abc': 5}
804 ip.user_ns['d'] = {'abc': 5}
788
805
789 _, matches = complete(line_buffer="no_getitem['")
806 _, matches = complete(line_buffer="no_getitem['")
790 _, matches = complete(line_buffer="no_keys['")
807 _, matches = complete(line_buffer="no_keys['")
791 _, matches = complete(line_buffer="cant_call_keys['")
808 _, matches = complete(line_buffer="cant_call_keys['")
792 _, matches = complete(line_buffer="empty['")
809 _, matches = complete(line_buffer="empty['")
793 _, matches = complete(line_buffer="name_error['")
810 _, matches = complete(line_buffer="name_error['")
794 _, matches = complete(line_buffer="d['\\") # incomplete escape
811 _, matches = complete(line_buffer="d['\\") # incomplete escape
795
812
796 class KeyCompletable(object):
813 class KeyCompletable(object):
797 def __init__(self, things=()):
814 def __init__(self, things=()):
798 self.things = things
815 self.things = things
799
816
800 def _ipython_key_completions_(self):
817 def _ipython_key_completions_(self):
801 return list(self.things)
818 return list(self.things)
802
819
803 def test_object_key_completion():
820 def test_object_key_completion():
804 ip = get_ipython()
821 ip = get_ipython()
805 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
822 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
806
823
807 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
824 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
808 nt.assert_in('qwerty', matches)
825 nt.assert_in('qwerty', matches)
809 nt.assert_in('qwick', matches)
826 nt.assert_in('qwick', matches)
810
827
811
828
812 def test_tryimport():
829 def test_tryimport():
813 """
830 """
814 Test that try-import don't crash on trailing dot, and import modules before
831 Test that try-import don't crash on trailing dot, and import modules before
815 """
832 """
816 from IPython.core.completerlib import try_import
833 from IPython.core.completerlib import try_import
817 assert(try_import("IPython."))
834 assert(try_import("IPython."))
818
835
819
836
820 def test_aimport_module_completer():
837 def test_aimport_module_completer():
821 ip = get_ipython()
838 ip = get_ipython()
822 _, matches = ip.complete('i', '%aimport i')
839 _, matches = ip.complete('i', '%aimport i')
823 nt.assert_in('io', matches)
840 nt.assert_in('io', matches)
824 nt.assert_not_in('int', matches)
841 nt.assert_not_in('int', matches)
825
842
826 def test_nested_import_module_completer():
843 def test_nested_import_module_completer():
827 ip = get_ipython()
844 ip = get_ipython()
828 _, matches = ip.complete(None, 'import IPython.co', 17)
845 _, matches = ip.complete(None, 'import IPython.co', 17)
829 nt.assert_in('IPython.core', matches)
846 nt.assert_in('IPython.core', matches)
830 nt.assert_not_in('import IPython.core', matches)
847 nt.assert_not_in('import IPython.core', matches)
831 nt.assert_not_in('IPython.display', matches)
848 nt.assert_not_in('IPython.display', matches)
832
849
833 def test_import_module_completer():
850 def test_import_module_completer():
834 ip = get_ipython()
851 ip = get_ipython()
835 _, matches = ip.complete('i', 'import i')
852 _, matches = ip.complete('i', 'import i')
836 nt.assert_in('io', matches)
853 nt.assert_in('io', matches)
837 nt.assert_not_in('int', matches)
854 nt.assert_not_in('int', matches)
838
855
839 def test_from_module_completer():
856 def test_from_module_completer():
840 ip = get_ipython()
857 ip = get_ipython()
841 _, matches = ip.complete('B', 'from io import B', 16)
858 _, matches = ip.complete('B', 'from io import B', 16)
842 nt.assert_in('BytesIO', matches)
859 nt.assert_in('BytesIO', matches)
843 nt.assert_not_in('BaseException', matches)
860 nt.assert_not_in('BaseException', matches)
@@ -1,126 +1,128 b''
1 """prompt-toolkit utilities
1 """prompt-toolkit utilities
2
2
3 Everything in this module is a private API,
3 Everything in this module is a private API,
4 not to be used outside IPython.
4 not to be used outside IPython.
5 """
5 """
6
6
7 # Copyright (c) IPython Development Team.
7 # Copyright (c) IPython Development Team.
8 # Distributed under the terms of the Modified BSD License.
8 # Distributed under the terms of the Modified BSD License.
9
9
10 import unicodedata
10 import unicodedata
11 from wcwidth import wcwidth
11 from wcwidth import wcwidth
12
12
13 from IPython.core.completer import IPCompleter, provisionalcompleter, rectify_completions, cursor_to_position
13 from IPython.core.completer import (
14 IPCompleter, provisionalcompleter, rectify_completions, cursor_to_position,
15 _deduplicate_completions)
14 from prompt_toolkit.completion import Completer, Completion
16 from prompt_toolkit.completion import Completer, Completion
15 from prompt_toolkit.layout.lexers import Lexer
17 from prompt_toolkit.layout.lexers import Lexer
16 from prompt_toolkit.layout.lexers import PygmentsLexer
18 from prompt_toolkit.layout.lexers import PygmentsLexer
17
19
18 import pygments.lexers as pygments_lexers
20 import pygments.lexers as pygments_lexers
19
21
20 _completion_sentinel = object()
22 _completion_sentinel = object()
21
23
22
24
23
25
24
26
25 class IPythonPTCompleter(Completer):
27 class IPythonPTCompleter(Completer):
26 """Adaptor to provide IPython completions to prompt_toolkit"""
28 """Adaptor to provide IPython completions to prompt_toolkit"""
27 def __init__(self, ipy_completer=None, shell=None, patch_stdout=None):
29 def __init__(self, ipy_completer=None, shell=None, patch_stdout=None):
28 if shell is None and ipy_completer is None:
30 if shell is None and ipy_completer is None:
29 raise TypeError("Please pass shell=an InteractiveShell instance.")
31 raise TypeError("Please pass shell=an InteractiveShell instance.")
30 self._ipy_completer = ipy_completer
32 self._ipy_completer = ipy_completer
31 self.shell = shell
33 self.shell = shell
32 if patch_stdout is None:
34 if patch_stdout is None:
33 raise TypeError("Please pass patch_stdout")
35 raise TypeError("Please pass patch_stdout")
34 self.patch_stdout = patch_stdout
36 self.patch_stdout = patch_stdout
35
37
36 @property
38 @property
37 def ipy_completer(self):
39 def ipy_completer(self):
38 if self._ipy_completer:
40 if self._ipy_completer:
39 return self._ipy_completer
41 return self._ipy_completer
40 else:
42 else:
41 return self.shell.Completer
43 return self.shell.Completer
42
44
43 def get_completions(self, document, complete_event):
45 def get_completions(self, document, complete_event):
44 if not document.current_line.strip():
46 if not document.current_line.strip():
45 return
47 return
46 # Some bits of our completion system may print stuff (e.g. if a module
48 # Some bits of our completion system may print stuff (e.g. if a module
47 # is imported). This context manager ensures that doesn't interfere with
49 # is imported). This context manager ensures that doesn't interfere with
48 # the prompt.
50 # the prompt.
49
51
50 with self.patch_stdout(), provisionalcompleter():
52 with self.patch_stdout(), provisionalcompleter():
51 body = document.text
53 body = document.text
52 cursor_row = document.cursor_position_row
54 cursor_row = document.cursor_position_row
53 cursor_col = document.cursor_position_col
55 cursor_col = document.cursor_position_col
54 cursor_position = document.cursor_position
56 cursor_position = document.cursor_position
55 offset = cursor_to_position(body, cursor_row, cursor_col)
57 offset = cursor_to_position(body, cursor_row, cursor_col)
56 yield from self._get_completions(body, offset, cursor_position, self.ipy_completer)
58 yield from self._get_completions(body, offset, cursor_position, self.ipy_completer)
57
59
58 @staticmethod
60 @staticmethod
59 def _get_completions(body, offset, cursor_position, ipyc):
61 def _get_completions(body, offset, cursor_position, ipyc):
60 """
62 """
61 Private equivalent of get_completions() use only for unit_testing.
63 Private equivalent of get_completions() use only for unit_testing.
62 """
64 """
63 debug = getattr(ipyc, 'debug', False)
65 debug = getattr(ipyc, 'debug', False)
64 completions = rectify_completions(
66 completions = _deduplicate_completions(
65 body, ipyc.completions(body, offset), _debug=debug)
67 body, ipyc.completions(body, offset))
66 for c in completions:
68 for c in completions:
67 if not c.text:
69 if not c.text:
68 # Guard against completion machinery giving us an empty string.
70 # Guard against completion machinery giving us an empty string.
69 continue
71 continue
70 text = unicodedata.normalize('NFC', c.text)
72 text = unicodedata.normalize('NFC', c.text)
71 # When the first character of the completion has a zero length,
73 # When the first character of the completion has a zero length,
72 # then it's probably a decomposed unicode character. E.g. caused by
74 # then it's probably a decomposed unicode character. E.g. caused by
73 # the "\dot" completion. Try to compose again with the previous
75 # the "\dot" completion. Try to compose again with the previous
74 # character.
76 # character.
75 if wcwidth(text[0]) == 0:
77 if wcwidth(text[0]) == 0:
76 if cursor_position + c.start > 0:
78 if cursor_position + c.start > 0:
77 char_before = body[c.start - 1]
79 char_before = body[c.start - 1]
78 fixed_text = unicodedata.normalize(
80 fixed_text = unicodedata.normalize(
79 'NFC', char_before + text)
81 'NFC', char_before + text)
80
82
81 # Yield the modified completion instead, if this worked.
83 # Yield the modified completion instead, if this worked.
82 if wcwidth(text[0:1]) == 1:
84 if wcwidth(text[0:1]) == 1:
83 yield Completion(fixed_text, start_position=c.start - offset - 1)
85 yield Completion(fixed_text, start_position=c.start - offset - 1)
84 continue
86 continue
85
87
86 # TODO: Use Jedi to determine meta_text
88 # TODO: Use Jedi to determine meta_text
87 # (Jedi currently has a bug that results in incorrect information.)
89 # (Jedi currently has a bug that results in incorrect information.)
88 # meta_text = ''
90 # meta_text = ''
89 # yield Completion(m, start_position=start_pos,
91 # yield Completion(m, start_position=start_pos,
90 # display_meta=meta_text)
92 # display_meta=meta_text)
91 yield Completion(c.text, start_position=c.start - offset, display_meta=c.type)
93 yield Completion(c.text, start_position=c.start - offset, display_meta=c.type)
92
94
93 class IPythonPTLexer(Lexer):
95 class IPythonPTLexer(Lexer):
94 """
96 """
95 Wrapper around PythonLexer and BashLexer.
97 Wrapper around PythonLexer and BashLexer.
96 """
98 """
97 def __init__(self):
99 def __init__(self):
98 l = pygments_lexers
100 l = pygments_lexers
99 self.python_lexer = PygmentsLexer(l.Python3Lexer)
101 self.python_lexer = PygmentsLexer(l.Python3Lexer)
100 self.shell_lexer = PygmentsLexer(l.BashLexer)
102 self.shell_lexer = PygmentsLexer(l.BashLexer)
101
103
102 self.magic_lexers = {
104 self.magic_lexers = {
103 'HTML': PygmentsLexer(l.HtmlLexer),
105 'HTML': PygmentsLexer(l.HtmlLexer),
104 'html': PygmentsLexer(l.HtmlLexer),
106 'html': PygmentsLexer(l.HtmlLexer),
105 'javascript': PygmentsLexer(l.JavascriptLexer),
107 'javascript': PygmentsLexer(l.JavascriptLexer),
106 'js': PygmentsLexer(l.JavascriptLexer),
108 'js': PygmentsLexer(l.JavascriptLexer),
107 'perl': PygmentsLexer(l.PerlLexer),
109 'perl': PygmentsLexer(l.PerlLexer),
108 'ruby': PygmentsLexer(l.RubyLexer),
110 'ruby': PygmentsLexer(l.RubyLexer),
109 'latex': PygmentsLexer(l.TexLexer),
111 'latex': PygmentsLexer(l.TexLexer),
110 }
112 }
111
113
112 def lex_document(self, cli, document):
114 def lex_document(self, cli, document):
113 text = document.text.lstrip()
115 text = document.text.lstrip()
114
116
115 lexer = self.python_lexer
117 lexer = self.python_lexer
116
118
117 if text.startswith('!') or text.startswith('%%bash'):
119 if text.startswith('!') or text.startswith('%%bash'):
118 lexer = self.shell_lexer
120 lexer = self.shell_lexer
119
121
120 elif text.startswith('%%'):
122 elif text.startswith('%%'):
121 for magic, l in self.magic_lexers.items():
123 for magic, l in self.magic_lexers.items():
122 if text.startswith('%%' + magic):
124 if text.startswith('%%' + magic):
123 lexer = l
125 lexer = l
124 break
126 break
125
127
126 return lexer.lex_document(cli, document)
128 return lexer.lex_document(cli, document)
General Comments 0
You need to be logged in to leave comments. Login now