##// END OF EJS Templates
Escape the quote in a filename even if text starts with a quote
Christopher C. Aycock -
Show More
@@ -0,0 +1,2 b''
1 - Quotes in a filename are always escaped during tab-completion on non-Windows.
2 :ghpull:`10069`
@@ -1,1879 +1,1880 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Completion for IPython.
2 """Completion for IPython.
3
3
4 This module started as fork of the rlcompleter module in the Python standard
4 This module started as fork of the rlcompleter module in the Python standard
5 library. The original enhancements made to rlcompleter have been sent
5 library. The original enhancements made to rlcompleter have been sent
6 upstream and were accepted as of Python 2.3,
6 upstream and were accepted as of Python 2.3,
7
7
8 This module now support a wide variety of completion mechanism both available
8 This module now support a wide variety of completion mechanism both available
9 for normal classic Python code, as well as completer for IPython specific
9 for normal classic Python code, as well as completer for IPython specific
10 Syntax like magics.
10 Syntax like magics.
11
11
12 Latex and Unicode completion
12 Latex and Unicode completion
13 ============================
13 ============================
14
14
15 IPython and compatible frontends not only can complete your code, but can help
15 IPython and compatible frontends not only can complete your code, but can help
16 you to input a wide range of characters. In particular we allow you to insert
16 you to input a wide range of characters. In particular we allow you to insert
17 a unicode character using the tab completion mechanism.
17 a unicode character using the tab completion mechanism.
18
18
19 Forward latex/unicode completion
19 Forward latex/unicode completion
20 --------------------------------
20 --------------------------------
21
21
22 Forward completion allows you to easily type a unicode character using its latex
22 Forward completion allows you to easily type a unicode character using its latex
23 name, or unicode long description. To do so type a backslash follow by the
23 name, or unicode long description. To do so type a backslash follow by the
24 relevant name and press tab:
24 relevant name and press tab:
25
25
26
26
27 Using latex completion:
27 Using latex completion:
28
28
29 .. code::
29 .. code::
30
30
31 \\alpha<tab>
31 \\alpha<tab>
32 α
32 α
33
33
34 or using unicode completion:
34 or using unicode completion:
35
35
36
36
37 .. code::
37 .. code::
38
38
39 \\greek small letter alpha<tab>
39 \\greek small letter alpha<tab>
40 α
40 α
41
41
42
42
43 Only valid Python identifiers will complete. Combining characters (like arrow or
43 Only valid Python identifiers will complete. Combining characters (like arrow or
44 dots) are also available, unlike latex they need to be put after the their
44 dots) are also available, unlike latex they need to be put after the their
45 counterpart that is to say, `F\\\\vec<tab>` is correct, not `\\\\vec<tab>F`.
45 counterpart that is to say, `F\\\\vec<tab>` is correct, not `\\\\vec<tab>F`.
46
46
47 Some browsers are known to display combining characters incorrectly.
47 Some browsers are known to display combining characters incorrectly.
48
48
49 Backward latex completion
49 Backward latex completion
50 -------------------------
50 -------------------------
51
51
52 It is sometime challenging to know how to type a character, if you are using
52 It is sometime challenging to know how to type a character, if you are using
53 IPython, or any compatible frontend you can prepend backslash to the character
53 IPython, or any compatible frontend you can prepend backslash to the character
54 and press `<tab>` to expand it to its latex form.
54 and press `<tab>` to expand it to its latex form.
55
55
56 .. code::
56 .. code::
57
57
58 \\α<tab>
58 \\α<tab>
59 \\alpha
59 \\alpha
60
60
61
61
62 Both forward and backward completions can be deactivated by setting the
62 Both forward and backward completions can be deactivated by setting the
63 ``Completer.backslash_combining_completions`` option to ``False``.
63 ``Completer.backslash_combining_completions`` option to ``False``.
64
64
65
65
66 Experimental
66 Experimental
67 ============
67 ============
68
68
69 Starting with IPython 6.0, this module can make use of the Jedi library to
69 Starting with IPython 6.0, this module can make use of the Jedi library to
70 generate completions both using static analysis of the code, and dynamically
70 generate completions both using static analysis of the code, and dynamically
71 inspecting multiple namespaces. The APIs attached to this new mechanism is
71 inspecting multiple namespaces. The APIs attached to this new mechanism is
72 unstable and will raise unless use in an :any:`provisionalcompleter` context
72 unstable and will raise unless use in an :any:`provisionalcompleter` context
73 manager.
73 manager.
74
74
75 You will find that the following are experimental:
75 You will find that the following are experimental:
76
76
77 - :any:`provisionalcompleter`
77 - :any:`provisionalcompleter`
78 - :any:`IPCompleter.completions`
78 - :any:`IPCompleter.completions`
79 - :any:`Completion`
79 - :any:`Completion`
80 - :any:`rectify_completions`
80 - :any:`rectify_completions`
81
81
82 .. note::
82 .. note::
83
83
84 better name for :any:`rectify_completions` ?
84 better name for :any:`rectify_completions` ?
85
85
86 We welcome any feedback on these new API, and we also encourage you to try this
86 We welcome any feedback on these new API, and we also encourage you to try this
87 module in debug mode (start IPython with ``--Completer.debug=True``) in order
87 module in debug mode (start IPython with ``--Completer.debug=True``) in order
88 to have extra logging information is :any:`jedi` is crashing, or if current
88 to have extra logging information is :any:`jedi` is crashing, or if current
89 IPython completer pending deprecations are returning results not yet handled
89 IPython completer pending deprecations are returning results not yet handled
90 by :any:`jedi`.
90 by :any:`jedi`.
91
91
92 Using Jedi for tab completion allow snippets like the following to work without
92 Using Jedi for tab completion allow snippets like the following to work without
93 having to execute any code:
93 having to execute any code:
94
94
95 >>> myvar = ['hello', 42]
95 >>> myvar = ['hello', 42]
96 ... myvar[1].bi<tab>
96 ... myvar[1].bi<tab>
97
97
98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
99 executing any code unlike the previously available ``IPCompleter.greedy``
99 executing any code unlike the previously available ``IPCompleter.greedy``
100 option.
100 option.
101
101
102 Be sure to update :any:`jedi` to the latest stable version or to try the
102 Be sure to update :any:`jedi` to the latest stable version or to try the
103 current development version to get better completions.
103 current development version to get better completions.
104 """
104 """
105
105
106 # skip module docstests
106 # skip module docstests
107 skip_doctest = True
107 skip_doctest = True
108
108
109 # Copyright (c) IPython Development Team.
109 # Copyright (c) IPython Development Team.
110 # Distributed under the terms of the Modified BSD License.
110 # Distributed under the terms of the Modified BSD License.
111 #
111 #
112 # Some of this code originated from rlcompleter in the Python standard library
112 # Some of this code originated from rlcompleter in the Python standard library
113 # Copyright (C) 2001 Python Software Foundation, www.python.org
113 # Copyright (C) 2001 Python Software Foundation, www.python.org
114
114
115
115
116 import __main__
116 import __main__
117 import builtins as builtin_mod
117 import builtins as builtin_mod
118 import glob
118 import glob
119 import time
119 import time
120 import inspect
120 import inspect
121 import itertools
121 import itertools
122 import keyword
122 import keyword
123 import os
123 import os
124 import re
124 import re
125 import sys
125 import sys
126 import unicodedata
126 import unicodedata
127 import string
127 import string
128 import warnings
128 import warnings
129
129
130 from contextlib import contextmanager
130 from contextlib import contextmanager
131 from importlib import import_module
131 from importlib import import_module
132 from typing import Iterator, List
132 from typing import Iterator, List
133 from types import SimpleNamespace
133 from types import SimpleNamespace
134
134
135 from traitlets.config.configurable import Configurable
135 from traitlets.config.configurable import Configurable
136 from IPython.core.error import TryNext
136 from IPython.core.error import TryNext
137 from IPython.core.inputsplitter import ESC_MAGIC
137 from IPython.core.inputsplitter import ESC_MAGIC
138 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
138 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
139 from IPython.utils import generics
139 from IPython.utils import generics
140 from IPython.utils.dir2 import dir2, get_real_method
140 from IPython.utils.dir2 import dir2, get_real_method
141 from IPython.utils.process import arg_split
141 from IPython.utils.process import arg_split
142 from IPython.utils.py3compat import cast_unicode_py2
142 from IPython.utils.py3compat import cast_unicode_py2
143 from traitlets import Bool, Enum, observe, Int
143 from traitlets import Bool, Enum, observe, Int
144
144
145 try:
145 try:
146 import jedi
146 import jedi
147 import jedi.api.helpers
147 import jedi.api.helpers
148 JEDI_INSTALLED = True
148 JEDI_INSTALLED = True
149 except ImportError:
149 except ImportError:
150 JEDI_INSTALLED = False
150 JEDI_INSTALLED = False
151 #-----------------------------------------------------------------------------
151 #-----------------------------------------------------------------------------
152 # Globals
152 # Globals
153 #-----------------------------------------------------------------------------
153 #-----------------------------------------------------------------------------
154
154
155 # Public API
155 # Public API
156 __all__ = ['Completer','IPCompleter']
156 __all__ = ['Completer','IPCompleter']
157
157
158 if sys.platform == 'win32':
158 if sys.platform == 'win32':
159 PROTECTABLES = ' '
159 PROTECTABLES = ' '
160 else:
160 else:
161 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
161 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
162
162
163
163
164 _deprecation_readline_sentinel = object()
164 _deprecation_readline_sentinel = object()
165
165
166
166
167 class ProvisionalCompleterWarning(FutureWarning):
167 class ProvisionalCompleterWarning(FutureWarning):
168 """
168 """
169 Exception raise by an experimental feature in this module.
169 Exception raise by an experimental feature in this module.
170
170
171 Wrap code in :any:`provisionalcompleter` context manager if you
171 Wrap code in :any:`provisionalcompleter` context manager if you
172 are certain you want to use an unstable feature.
172 are certain you want to use an unstable feature.
173 """
173 """
174 pass
174 pass
175
175
176 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
176 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
177
177
178 @contextmanager
178 @contextmanager
179 def provisionalcompleter(action='ignore'):
179 def provisionalcompleter(action='ignore'):
180 """
180 """
181
181
182
182
183 This contest manager has to be used in any place where unstable completer
183 This contest manager has to be used in any place where unstable completer
184 behavior and API may be called.
184 behavior and API may be called.
185
185
186 >>> with provisionalcompleter():
186 >>> with provisionalcompleter():
187 ... completer.do_experimetal_things() # works
187 ... completer.do_experimetal_things() # works
188
188
189 >>> completer.do_experimental_things() # raises.
189 >>> completer.do_experimental_things() # raises.
190
190
191 .. note:: Unstable
191 .. note:: Unstable
192
192
193 By using this context manager you agree that the API in use may change
193 By using this context manager you agree that the API in use may change
194 without warning, and that you won't complain if they do so.
194 without warning, and that you won't complain if they do so.
195
195
196 You also understand that if the API is not to you liking you should report
196 You also understand that if the API is not to you liking you should report
197 a bug to explain your use case upstream and improve the API and will loose
197 a bug to explain your use case upstream and improve the API and will loose
198 credibility if you complain after the API is make stable.
198 credibility if you complain after the API is make stable.
199
199
200 We'll be happy to get your feedback , feature request and improvement on
200 We'll be happy to get your feedback , feature request and improvement on
201 any of the unstable APIs !
201 any of the unstable APIs !
202 """
202 """
203 with warnings.catch_warnings():
203 with warnings.catch_warnings():
204 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
204 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
205 yield
205 yield
206
206
207
207
208 def has_open_quotes(s):
208 def has_open_quotes(s):
209 """Return whether a string has open quotes.
209 """Return whether a string has open quotes.
210
210
211 This simply counts whether the number of quote characters of either type in
211 This simply counts whether the number of quote characters of either type in
212 the string is odd.
212 the string is odd.
213
213
214 Returns
214 Returns
215 -------
215 -------
216 If there is an open quote, the quote character is returned. Else, return
216 If there is an open quote, the quote character is returned. Else, return
217 False.
217 False.
218 """
218 """
219 # We check " first, then ', so complex cases with nested quotes will get
219 # We check " first, then ', so complex cases with nested quotes will get
220 # the " to take precedence.
220 # the " to take precedence.
221 if s.count('"') % 2:
221 if s.count('"') % 2:
222 return '"'
222 return '"'
223 elif s.count("'") % 2:
223 elif s.count("'") % 2:
224 return "'"
224 return "'"
225 else:
225 else:
226 return False
226 return False
227
227
228
228
229 def protect_filename(s):
229 def protect_filename(s, protectables=PROTECTABLES):
230 """Escape a string to protect certain characters."""
230 """Escape a string to protect certain characters."""
231 if set(s) & set(PROTECTABLES):
231 if set(s) & set(protectables):
232 if sys.platform == "win32":
232 if sys.platform == "win32":
233 return '"' + s + '"'
233 return '"' + s + '"'
234 else:
234 else:
235 return "".join(("\\" + c if c in PROTECTABLES else c) for c in s)
235 return "".join(("\\" + c if c in protectables else c) for c in s)
236 else:
236 else:
237 return s
237 return s
238
238
239
239
240 def expand_user(path):
240 def expand_user(path):
241 """Expand ``~``-style usernames in strings.
241 """Expand ``~``-style usernames in strings.
242
242
243 This is similar to :func:`os.path.expanduser`, but it computes and returns
243 This is similar to :func:`os.path.expanduser`, but it computes and returns
244 extra information that will be useful if the input was being used in
244 extra information that will be useful if the input was being used in
245 computing completions, and you wish to return the completions with the
245 computing completions, and you wish to return the completions with the
246 original '~' instead of its expanded value.
246 original '~' instead of its expanded value.
247
247
248 Parameters
248 Parameters
249 ----------
249 ----------
250 path : str
250 path : str
251 String to be expanded. If no ~ is present, the output is the same as the
251 String to be expanded. If no ~ is present, the output is the same as the
252 input.
252 input.
253
253
254 Returns
254 Returns
255 -------
255 -------
256 newpath : str
256 newpath : str
257 Result of ~ expansion in the input path.
257 Result of ~ expansion in the input path.
258 tilde_expand : bool
258 tilde_expand : bool
259 Whether any expansion was performed or not.
259 Whether any expansion was performed or not.
260 tilde_val : str
260 tilde_val : str
261 The value that ~ was replaced with.
261 The value that ~ was replaced with.
262 """
262 """
263 # Default values
263 # Default values
264 tilde_expand = False
264 tilde_expand = False
265 tilde_val = ''
265 tilde_val = ''
266 newpath = path
266 newpath = path
267
267
268 if path.startswith('~'):
268 if path.startswith('~'):
269 tilde_expand = True
269 tilde_expand = True
270 rest = len(path)-1
270 rest = len(path)-1
271 newpath = os.path.expanduser(path)
271 newpath = os.path.expanduser(path)
272 if rest:
272 if rest:
273 tilde_val = newpath[:-rest]
273 tilde_val = newpath[:-rest]
274 else:
274 else:
275 tilde_val = newpath
275 tilde_val = newpath
276
276
277 return newpath, tilde_expand, tilde_val
277 return newpath, tilde_expand, tilde_val
278
278
279
279
280 def compress_user(path, tilde_expand, tilde_val):
280 def compress_user(path, tilde_expand, tilde_val):
281 """Does the opposite of expand_user, with its outputs.
281 """Does the opposite of expand_user, with its outputs.
282 """
282 """
283 if tilde_expand:
283 if tilde_expand:
284 return path.replace(tilde_val, '~')
284 return path.replace(tilde_val, '~')
285 else:
285 else:
286 return path
286 return path
287
287
288
288
289 def completions_sorting_key(word):
289 def completions_sorting_key(word):
290 """key for sorting completions
290 """key for sorting completions
291
291
292 This does several things:
292 This does several things:
293
293
294 - Lowercase all completions, so they are sorted alphabetically with
294 - Lowercase all completions, so they are sorted alphabetically with
295 upper and lower case words mingled
295 upper and lower case words mingled
296 - Demote any completions starting with underscores to the end
296 - Demote any completions starting with underscores to the end
297 - Insert any %magic and %%cellmagic completions in the alphabetical order
297 - Insert any %magic and %%cellmagic completions in the alphabetical order
298 by their name
298 by their name
299 """
299 """
300 # Case insensitive sort
300 # Case insensitive sort
301 word = word.lower()
301 word = word.lower()
302
302
303 prio1, prio2 = 0, 0
303 prio1, prio2 = 0, 0
304
304
305 if word.startswith('__'):
305 if word.startswith('__'):
306 prio1 = 2
306 prio1 = 2
307 elif word.startswith('_'):
307 elif word.startswith('_'):
308 prio1 = 1
308 prio1 = 1
309
309
310 if word.endswith('='):
310 if word.endswith('='):
311 prio1 = -1
311 prio1 = -1
312
312
313 if word.startswith('%%'):
313 if word.startswith('%%'):
314 # If there's another % in there, this is something else, so leave it alone
314 # If there's another % in there, this is something else, so leave it alone
315 if not "%" in word[2:]:
315 if not "%" in word[2:]:
316 word = word[2:]
316 word = word[2:]
317 prio2 = 2
317 prio2 = 2
318 elif word.startswith('%'):
318 elif word.startswith('%'):
319 if not "%" in word[1:]:
319 if not "%" in word[1:]:
320 word = word[1:]
320 word = word[1:]
321 prio2 = 1
321 prio2 = 1
322
322
323 return prio1, word, prio2
323 return prio1, word, prio2
324
324
325
325
326 class _FakeJediCompletion:
326 class _FakeJediCompletion:
327 """
327 """
328 This is a workaround to communicate to the UI that Jedi has crashed and to
328 This is a workaround to communicate to the UI that Jedi has crashed and to
329 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
329 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
330
330
331 Added in IPython 6.0 so should likely be removed for 7.0
331 Added in IPython 6.0 so should likely be removed for 7.0
332
332
333 """
333 """
334
334
335 def __init__(self, name):
335 def __init__(self, name):
336
336
337 self.name = name
337 self.name = name
338 self.complete = name
338 self.complete = name
339 self.type = 'crashed'
339 self.type = 'crashed'
340 self.name_with_symbols = name
340 self.name_with_symbols = name
341
341
342 def __repr__(self):
342 def __repr__(self):
343 return '<Fake completion object jedi has crashed>'
343 return '<Fake completion object jedi has crashed>'
344
344
345
345
346 class Completion:
346 class Completion:
347 """
347 """
348 Completion object used and return by IPython completers.
348 Completion object used and return by IPython completers.
349
349
350 .. warning:: Unstable
350 .. warning:: Unstable
351
351
352 This function is unstable, API may change without warning.
352 This function is unstable, API may change without warning.
353 It will also raise unless use in proper context manager.
353 It will also raise unless use in proper context manager.
354
354
355 This act as a middle ground :any:`Completion` object between the
355 This act as a middle ground :any:`Completion` object between the
356 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
356 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
357 object. While Jedi need a lot of information about evaluator and how the
357 object. While Jedi need a lot of information about evaluator and how the
358 code should be ran/inspected, PromptToolkit (and other frontend) mostly
358 code should be ran/inspected, PromptToolkit (and other frontend) mostly
359 need user facing information.
359 need user facing information.
360
360
361 - Which range should be replaced replaced by what.
361 - Which range should be replaced replaced by what.
362 - Some metadata (like completion type), or meta informations to displayed to
362 - Some metadata (like completion type), or meta informations to displayed to
363 the use user.
363 the use user.
364
364
365 For debugging purpose we can also store the origin of the completion (``jedi``,
365 For debugging purpose we can also store the origin of the completion (``jedi``,
366 ``IPython.python_matches``, ``IPython.magics_matches``...).
366 ``IPython.python_matches``, ``IPython.magics_matches``...).
367 """
367 """
368
368
369 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin=''):
369 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin=''):
370 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
370 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
371 "It may change without warnings. "
371 "It may change without warnings. "
372 "Use in corresponding context manager.",
372 "Use in corresponding context manager.",
373 category=ProvisionalCompleterWarning, stacklevel=2)
373 category=ProvisionalCompleterWarning, stacklevel=2)
374
374
375 self.start = start
375 self.start = start
376 self.end = end
376 self.end = end
377 self.text = text
377 self.text = text
378 self.type = type
378 self.type = type
379 self._origin = _origin
379 self._origin = _origin
380
380
381 def __repr__(self):
381 def __repr__(self):
382 return '<Completion start=%s end=%s text=%r type=%r>' % (self.start, self.end, self.text, self.type or '?')
382 return '<Completion start=%s end=%s text=%r type=%r>' % (self.start, self.end, self.text, self.type or '?')
383
383
384 def __eq__(self, other)->Bool:
384 def __eq__(self, other)->Bool:
385 """
385 """
386 Equality and hash do not hash the type (as some completer may not be
386 Equality and hash do not hash the type (as some completer may not be
387 able to infer the type), but are use to (partially) de-duplicate
387 able to infer the type), but are use to (partially) de-duplicate
388 completion.
388 completion.
389
389
390 Completely de-duplicating completion is a bit tricker that just
390 Completely de-duplicating completion is a bit tricker that just
391 comparing as it depends on surrounding text, which Completions are not
391 comparing as it depends on surrounding text, which Completions are not
392 aware of.
392 aware of.
393 """
393 """
394 return self.start == other.start and \
394 return self.start == other.start and \
395 self.end == other.end and \
395 self.end == other.end and \
396 self.text == other.text
396 self.text == other.text
397
397
398 def __hash__(self):
398 def __hash__(self):
399 return hash((self.start, self.end, self.text))
399 return hash((self.start, self.end, self.text))
400
400
401
401
402 _IC = Iterator[Completion]
402 _IC = Iterator[Completion]
403
403
404
404
405 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
405 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
406 """
406 """
407 Deduplicate a set of completions.
407 Deduplicate a set of completions.
408
408
409 .. warning:: Unstable
409 .. warning:: Unstable
410
410
411 This function is unstable, API may change without warning.
411 This function is unstable, API may change without warning.
412
412
413 Parameters
413 Parameters
414 ----------
414 ----------
415 text: str
415 text: str
416 text that should be completed.
416 text that should be completed.
417 completions: Iterator[Completion]
417 completions: Iterator[Completion]
418 iterator over the completions to deduplicate
418 iterator over the completions to deduplicate
419
419
420
420
421 Completions coming from multiple sources, may be different but end up having
421 Completions coming from multiple sources, may be different but end up having
422 the same effect when applied to ``text``. If this is the case, this will
422 the same effect when applied to ``text``. If this is the case, this will
423 consider completions as equal and only emit the first encountered.
423 consider completions as equal and only emit the first encountered.
424
424
425 Not folded in `completions()` yet for debugging purpose, and to detect when
425 Not folded in `completions()` yet for debugging purpose, and to detect when
426 the IPython completer does return things that Jedi does not, but should be
426 the IPython completer does return things that Jedi does not, but should be
427 at some point.
427 at some point.
428 """
428 """
429 completions = list(completions)
429 completions = list(completions)
430 if not completions:
430 if not completions:
431 return
431 return
432
432
433 new_start = min(c.start for c in completions)
433 new_start = min(c.start for c in completions)
434 new_end = max(c.end for c in completions)
434 new_end = max(c.end for c in completions)
435
435
436 seen = set()
436 seen = set()
437 for c in completions:
437 for c in completions:
438 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
438 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
439 if new_text not in seen:
439 if new_text not in seen:
440 yield c
440 yield c
441 seen.add(new_text)
441 seen.add(new_text)
442
442
443
443
444 def rectify_completions(text: str, completions: _IC, *, _debug=False)->_IC:
444 def rectify_completions(text: str, completions: _IC, *, _debug=False)->_IC:
445 """
445 """
446 Rectify a set of completions to all have the same ``start`` and ``end``
446 Rectify a set of completions to all have the same ``start`` and ``end``
447
447
448 .. warning:: Unstable
448 .. warning:: Unstable
449
449
450 This function is unstable, API may change without warning.
450 This function is unstable, API may change without warning.
451 It will also raise unless use in proper context manager.
451 It will also raise unless use in proper context manager.
452
452
453 Parameters
453 Parameters
454 ----------
454 ----------
455 text: str
455 text: str
456 text that should be completed.
456 text that should be completed.
457 completions: Iterator[Completion]
457 completions: Iterator[Completion]
458 iterator over the completions to rectify
458 iterator over the completions to rectify
459
459
460
460
461 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
461 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
462 the Jupyter Protocol requires them to behave like so. This will readjust
462 the Jupyter Protocol requires them to behave like so. This will readjust
463 the completion to have the same ``start`` and ``end`` by padding both
463 the completion to have the same ``start`` and ``end`` by padding both
464 extremities with surrounding text.
464 extremities with surrounding text.
465
465
466 During stabilisation should support a ``_debug`` option to log which
466 During stabilisation should support a ``_debug`` option to log which
467 completion are return by the IPython completer and not found in Jedi in
467 completion are return by the IPython completer and not found in Jedi in
468 order to make upstream bug report.
468 order to make upstream bug report.
469 """
469 """
470 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
470 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
471 "It may change without warnings. "
471 "It may change without warnings. "
472 "Use in corresponding context manager.",
472 "Use in corresponding context manager.",
473 category=ProvisionalCompleterWarning, stacklevel=2)
473 category=ProvisionalCompleterWarning, stacklevel=2)
474
474
475 completions = list(completions)
475 completions = list(completions)
476 if not completions:
476 if not completions:
477 return
477 return
478 starts = (c.start for c in completions)
478 starts = (c.start for c in completions)
479 ends = (c.end for c in completions)
479 ends = (c.end for c in completions)
480
480
481 new_start = min(starts)
481 new_start = min(starts)
482 new_end = max(ends)
482 new_end = max(ends)
483
483
484 seen_jedi = set()
484 seen_jedi = set()
485 seen_python_matches = set()
485 seen_python_matches = set()
486 for c in completions:
486 for c in completions:
487 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
487 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
488 if c._origin == 'jedi':
488 if c._origin == 'jedi':
489 seen_jedi.add(new_text)
489 seen_jedi.add(new_text)
490 elif c._origin == 'IPCompleter.python_matches':
490 elif c._origin == 'IPCompleter.python_matches':
491 seen_python_matches.add(new_text)
491 seen_python_matches.add(new_text)
492 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin)
492 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin)
493 diff = seen_python_matches.difference(seen_jedi)
493 diff = seen_python_matches.difference(seen_jedi)
494 if diff and _debug:
494 if diff and _debug:
495 print('IPython.python matches have extras:', diff)
495 print('IPython.python matches have extras:', diff)
496
496
497
497
498 if sys.platform == 'win32':
498 if sys.platform == 'win32':
499 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
499 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
500 else:
500 else:
501 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
501 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
502
502
503 GREEDY_DELIMS = ' =\r\n'
503 GREEDY_DELIMS = ' =\r\n'
504
504
505
505
506 class CompletionSplitter(object):
506 class CompletionSplitter(object):
507 """An object to split an input line in a manner similar to readline.
507 """An object to split an input line in a manner similar to readline.
508
508
509 By having our own implementation, we can expose readline-like completion in
509 By having our own implementation, we can expose readline-like completion in
510 a uniform manner to all frontends. This object only needs to be given the
510 a uniform manner to all frontends. This object only needs to be given the
511 line of text to be split and the cursor position on said line, and it
511 line of text to be split and the cursor position on said line, and it
512 returns the 'word' to be completed on at the cursor after splitting the
512 returns the 'word' to be completed on at the cursor after splitting the
513 entire line.
513 entire line.
514
514
515 What characters are used as splitting delimiters can be controlled by
515 What characters are used as splitting delimiters can be controlled by
516 setting the ``delims`` attribute (this is a property that internally
516 setting the ``delims`` attribute (this is a property that internally
517 automatically builds the necessary regular expression)"""
517 automatically builds the necessary regular expression)"""
518
518
519 # Private interface
519 # Private interface
520
520
521 # A string of delimiter characters. The default value makes sense for
521 # A string of delimiter characters. The default value makes sense for
522 # IPython's most typical usage patterns.
522 # IPython's most typical usage patterns.
523 _delims = DELIMS
523 _delims = DELIMS
524
524
525 # The expression (a normal string) to be compiled into a regular expression
525 # The expression (a normal string) to be compiled into a regular expression
526 # for actual splitting. We store it as an attribute mostly for ease of
526 # for actual splitting. We store it as an attribute mostly for ease of
527 # debugging, since this type of code can be so tricky to debug.
527 # debugging, since this type of code can be so tricky to debug.
528 _delim_expr = None
528 _delim_expr = None
529
529
530 # The regular expression that does the actual splitting
530 # The regular expression that does the actual splitting
531 _delim_re = None
531 _delim_re = None
532
532
533 def __init__(self, delims=None):
533 def __init__(self, delims=None):
534 delims = CompletionSplitter._delims if delims is None else delims
534 delims = CompletionSplitter._delims if delims is None else delims
535 self.delims = delims
535 self.delims = delims
536
536
537 @property
537 @property
538 def delims(self):
538 def delims(self):
539 """Return the string of delimiter characters."""
539 """Return the string of delimiter characters."""
540 return self._delims
540 return self._delims
541
541
542 @delims.setter
542 @delims.setter
543 def delims(self, delims):
543 def delims(self, delims):
544 """Set the delimiters for line splitting."""
544 """Set the delimiters for line splitting."""
545 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
545 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
546 self._delim_re = re.compile(expr)
546 self._delim_re = re.compile(expr)
547 self._delims = delims
547 self._delims = delims
548 self._delim_expr = expr
548 self._delim_expr = expr
549
549
550 def split_line(self, line, cursor_pos=None):
550 def split_line(self, line, cursor_pos=None):
551 """Split a line of text with a cursor at the given position.
551 """Split a line of text with a cursor at the given position.
552 """
552 """
553 l = line if cursor_pos is None else line[:cursor_pos]
553 l = line if cursor_pos is None else line[:cursor_pos]
554 return self._delim_re.split(l)[-1]
554 return self._delim_re.split(l)[-1]
555
555
556
556
557
557
558 class Completer(Configurable):
558 class Completer(Configurable):
559
559
560 greedy = Bool(False,
560 greedy = Bool(False,
561 help="""Activate greedy completion
561 help="""Activate greedy completion
562 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
562 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
563
563
564 This will enable completion on elements of lists, results of function calls, etc.,
564 This will enable completion on elements of lists, results of function calls, etc.,
565 but can be unsafe because the code is actually evaluated on TAB.
565 but can be unsafe because the code is actually evaluated on TAB.
566 """
566 """
567 ).tag(config=True)
567 ).tag(config=True)
568
568
569 use_jedi = Bool(default_value=JEDI_INSTALLED,
569 use_jedi = Bool(default_value=JEDI_INSTALLED,
570 help="Experimental: Use Jedi to generate autocompletions. "
570 help="Experimental: Use Jedi to generate autocompletions. "
571 "Default to True if jedi is installed").tag(config=True)
571 "Default to True if jedi is installed").tag(config=True)
572
572
573 jedi_compute_type_timeout = Int(default_value=400,
573 jedi_compute_type_timeout = Int(default_value=400,
574 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
574 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
575 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
575 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
576 performance by preventing jedi to build its cache.
576 performance by preventing jedi to build its cache.
577 """).tag(config=True)
577 """).tag(config=True)
578
578
579 debug = Bool(default_value=False,
579 debug = Bool(default_value=False,
580 help='Enable debug for the Completer. Mostly print extra '
580 help='Enable debug for the Completer. Mostly print extra '
581 'information for experimental jedi integration.')\
581 'information for experimental jedi integration.')\
582 .tag(config=True)
582 .tag(config=True)
583
583
584 backslash_combining_completions = Bool(True,
584 backslash_combining_completions = Bool(True,
585 help="Enable unicode completions, e.g. \\alpha<tab> . "
585 help="Enable unicode completions, e.g. \\alpha<tab> . "
586 "Includes completion of latex commands, unicode names, and expanding "
586 "Includes completion of latex commands, unicode names, and expanding "
587 "unicode characters back to latex commands.").tag(config=True)
587 "unicode characters back to latex commands.").tag(config=True)
588
588
589
589
590
590
591 def __init__(self, namespace=None, global_namespace=None, **kwargs):
591 def __init__(self, namespace=None, global_namespace=None, **kwargs):
592 """Create a new completer for the command line.
592 """Create a new completer for the command line.
593
593
594 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
594 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
595
595
596 If unspecified, the default namespace where completions are performed
596 If unspecified, the default namespace where completions are performed
597 is __main__ (technically, __main__.__dict__). Namespaces should be
597 is __main__ (technically, __main__.__dict__). Namespaces should be
598 given as dictionaries.
598 given as dictionaries.
599
599
600 An optional second namespace can be given. This allows the completer
600 An optional second namespace can be given. This allows the completer
601 to handle cases where both the local and global scopes need to be
601 to handle cases where both the local and global scopes need to be
602 distinguished.
602 distinguished.
603 """
603 """
604
604
605 # Don't bind to namespace quite yet, but flag whether the user wants a
605 # Don't bind to namespace quite yet, but flag whether the user wants a
606 # specific namespace or to use __main__.__dict__. This will allow us
606 # specific namespace or to use __main__.__dict__. This will allow us
607 # to bind to __main__.__dict__ at completion time, not now.
607 # to bind to __main__.__dict__ at completion time, not now.
608 if namespace is None:
608 if namespace is None:
609 self.use_main_ns = True
609 self.use_main_ns = True
610 else:
610 else:
611 self.use_main_ns = False
611 self.use_main_ns = False
612 self.namespace = namespace
612 self.namespace = namespace
613
613
614 # The global namespace, if given, can be bound directly
614 # The global namespace, if given, can be bound directly
615 if global_namespace is None:
615 if global_namespace is None:
616 self.global_namespace = {}
616 self.global_namespace = {}
617 else:
617 else:
618 self.global_namespace = global_namespace
618 self.global_namespace = global_namespace
619
619
620 super(Completer, self).__init__(**kwargs)
620 super(Completer, self).__init__(**kwargs)
621
621
622 def complete(self, text, state):
622 def complete(self, text, state):
623 """Return the next possible completion for 'text'.
623 """Return the next possible completion for 'text'.
624
624
625 This is called successively with state == 0, 1, 2, ... until it
625 This is called successively with state == 0, 1, 2, ... until it
626 returns None. The completion should begin with 'text'.
626 returns None. The completion should begin with 'text'.
627
627
628 """
628 """
629 if self.use_main_ns:
629 if self.use_main_ns:
630 self.namespace = __main__.__dict__
630 self.namespace = __main__.__dict__
631
631
632 if state == 0:
632 if state == 0:
633 if "." in text:
633 if "." in text:
634 self.matches = self.attr_matches(text)
634 self.matches = self.attr_matches(text)
635 else:
635 else:
636 self.matches = self.global_matches(text)
636 self.matches = self.global_matches(text)
637 try:
637 try:
638 return self.matches[state]
638 return self.matches[state]
639 except IndexError:
639 except IndexError:
640 return None
640 return None
641
641
642 def global_matches(self, text):
642 def global_matches(self, text):
643 """Compute matches when text is a simple name.
643 """Compute matches when text is a simple name.
644
644
645 Return a list of all keywords, built-in functions and names currently
645 Return a list of all keywords, built-in functions and names currently
646 defined in self.namespace or self.global_namespace that match.
646 defined in self.namespace or self.global_namespace that match.
647
647
648 """
648 """
649 matches = []
649 matches = []
650 match_append = matches.append
650 match_append = matches.append
651 n = len(text)
651 n = len(text)
652 for lst in [keyword.kwlist,
652 for lst in [keyword.kwlist,
653 builtin_mod.__dict__.keys(),
653 builtin_mod.__dict__.keys(),
654 self.namespace.keys(),
654 self.namespace.keys(),
655 self.global_namespace.keys()]:
655 self.global_namespace.keys()]:
656 for word in lst:
656 for word in lst:
657 if word[:n] == text and word != "__builtins__":
657 if word[:n] == text and word != "__builtins__":
658 match_append(word)
658 match_append(word)
659 return [cast_unicode_py2(m) for m in matches]
659 return [cast_unicode_py2(m) for m in matches]
660
660
661 def attr_matches(self, text):
661 def attr_matches(self, text):
662 """Compute matches when text contains a dot.
662 """Compute matches when text contains a dot.
663
663
664 Assuming the text is of the form NAME.NAME....[NAME], and is
664 Assuming the text is of the form NAME.NAME....[NAME], and is
665 evaluatable in self.namespace or self.global_namespace, it will be
665 evaluatable in self.namespace or self.global_namespace, it will be
666 evaluated and its attributes (as revealed by dir()) are used as
666 evaluated and its attributes (as revealed by dir()) are used as
667 possible completions. (For class instances, class members are are
667 possible completions. (For class instances, class members are are
668 also considered.)
668 also considered.)
669
669
670 WARNING: this can still invoke arbitrary C code, if an object
670 WARNING: this can still invoke arbitrary C code, if an object
671 with a __getattr__ hook is evaluated.
671 with a __getattr__ hook is evaluated.
672
672
673 """
673 """
674
674
675 # Another option, seems to work great. Catches things like ''.<tab>
675 # Another option, seems to work great. Catches things like ''.<tab>
676 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
676 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
677
677
678 if m:
678 if m:
679 expr, attr = m.group(1, 3)
679 expr, attr = m.group(1, 3)
680 elif self.greedy:
680 elif self.greedy:
681 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
681 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
682 if not m2:
682 if not m2:
683 return []
683 return []
684 expr, attr = m2.group(1,2)
684 expr, attr = m2.group(1,2)
685 else:
685 else:
686 return []
686 return []
687
687
688 try:
688 try:
689 obj = eval(expr, self.namespace)
689 obj = eval(expr, self.namespace)
690 except:
690 except:
691 try:
691 try:
692 obj = eval(expr, self.global_namespace)
692 obj = eval(expr, self.global_namespace)
693 except:
693 except:
694 return []
694 return []
695
695
696 if self.limit_to__all__ and hasattr(obj, '__all__'):
696 if self.limit_to__all__ and hasattr(obj, '__all__'):
697 words = get__all__entries(obj)
697 words = get__all__entries(obj)
698 else:
698 else:
699 words = dir2(obj)
699 words = dir2(obj)
700
700
701 try:
701 try:
702 words = generics.complete_object(obj, words)
702 words = generics.complete_object(obj, words)
703 except TryNext:
703 except TryNext:
704 pass
704 pass
705 except AssertionError:
705 except AssertionError:
706 raise
706 raise
707 except Exception:
707 except Exception:
708 # Silence errors from completion function
708 # Silence errors from completion function
709 #raise # dbg
709 #raise # dbg
710 pass
710 pass
711 # Build match list to return
711 # Build match list to return
712 n = len(attr)
712 n = len(attr)
713 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
713 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
714
714
715
715
716 def get__all__entries(obj):
716 def get__all__entries(obj):
717 """returns the strings in the __all__ attribute"""
717 """returns the strings in the __all__ attribute"""
718 try:
718 try:
719 words = getattr(obj, '__all__')
719 words = getattr(obj, '__all__')
720 except:
720 except:
721 return []
721 return []
722
722
723 return [cast_unicode_py2(w) for w in words if isinstance(w, str)]
723 return [cast_unicode_py2(w) for w in words if isinstance(w, str)]
724
724
725
725
726 def match_dict_keys(keys: List[str], prefix: str, delims: str):
726 def match_dict_keys(keys: List[str], prefix: str, delims: str):
727 """Used by dict_key_matches, matching the prefix to a list of keys
727 """Used by dict_key_matches, matching the prefix to a list of keys
728
728
729 Parameters
729 Parameters
730 ==========
730 ==========
731 keys:
731 keys:
732 list of keys in dictionary currently being completed.
732 list of keys in dictionary currently being completed.
733 prefix:
733 prefix:
734 Part of the text already typed by the user. e.g. `mydict[b'fo`
734 Part of the text already typed by the user. e.g. `mydict[b'fo`
735 delims:
735 delims:
736 String of delimiters to consider when finding the current key.
736 String of delimiters to consider when finding the current key.
737
737
738 Returns
738 Returns
739 =======
739 =======
740
740
741 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
741 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
742 ``quote`` being the quote that need to be used to close current string.
742 ``quote`` being the quote that need to be used to close current string.
743 ``token_start`` the position where the replacement should start occurring,
743 ``token_start`` the position where the replacement should start occurring,
744 ``matches`` a list of replacement/completion
744 ``matches`` a list of replacement/completion
745
745
746 """
746 """
747 if not prefix:
747 if not prefix:
748 return None, 0, [repr(k) for k in keys
748 return None, 0, [repr(k) for k in keys
749 if isinstance(k, (str, bytes))]
749 if isinstance(k, (str, bytes))]
750 quote_match = re.search('["\']', prefix)
750 quote_match = re.search('["\']', prefix)
751 quote = quote_match.group()
751 quote = quote_match.group()
752 try:
752 try:
753 prefix_str = eval(prefix + quote, {})
753 prefix_str = eval(prefix + quote, {})
754 except Exception:
754 except Exception:
755 return None, 0, []
755 return None, 0, []
756
756
757 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
757 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
758 token_match = re.search(pattern, prefix, re.UNICODE)
758 token_match = re.search(pattern, prefix, re.UNICODE)
759 token_start = token_match.start()
759 token_start = token_match.start()
760 token_prefix = token_match.group()
760 token_prefix = token_match.group()
761
761
762 matched = []
762 matched = []
763 for key in keys:
763 for key in keys:
764 try:
764 try:
765 if not key.startswith(prefix_str):
765 if not key.startswith(prefix_str):
766 continue
766 continue
767 except (AttributeError, TypeError, UnicodeError):
767 except (AttributeError, TypeError, UnicodeError):
768 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
768 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
769 continue
769 continue
770
770
771 # reformat remainder of key to begin with prefix
771 # reformat remainder of key to begin with prefix
772 rem = key[len(prefix_str):]
772 rem = key[len(prefix_str):]
773 # force repr wrapped in '
773 # force repr wrapped in '
774 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
774 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
775 if rem_repr.startswith('u') and prefix[0] not in 'uU':
775 if rem_repr.startswith('u') and prefix[0] not in 'uU':
776 # Found key is unicode, but prefix is Py2 string.
776 # Found key is unicode, but prefix is Py2 string.
777 # Therefore attempt to interpret key as string.
777 # Therefore attempt to interpret key as string.
778 try:
778 try:
779 rem_repr = repr(rem.encode('ascii') + '"')
779 rem_repr = repr(rem.encode('ascii') + '"')
780 except UnicodeEncodeError:
780 except UnicodeEncodeError:
781 continue
781 continue
782
782
783 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
783 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
784 if quote == '"':
784 if quote == '"':
785 # The entered prefix is quoted with ",
785 # The entered prefix is quoted with ",
786 # but the match is quoted with '.
786 # but the match is quoted with '.
787 # A contained " hence needs escaping for comparison:
787 # A contained " hence needs escaping for comparison:
788 rem_repr = rem_repr.replace('"', '\\"')
788 rem_repr = rem_repr.replace('"', '\\"')
789
789
790 # then reinsert prefix from start of token
790 # then reinsert prefix from start of token
791 matched.append('%s%s' % (token_prefix, rem_repr))
791 matched.append('%s%s' % (token_prefix, rem_repr))
792 return quote, token_start, matched
792 return quote, token_start, matched
793
793
794
794
795 def cursor_to_position(text:int, line:int, column:int)->int:
795 def cursor_to_position(text:int, line:int, column:int)->int:
796 """
796 """
797
797
798 Convert the (line,column) position of the cursor in text to an offset in a
798 Convert the (line,column) position of the cursor in text to an offset in a
799 string.
799 string.
800
800
801 Parameters
801 Parameters
802 ----------
802 ----------
803
803
804 text : str
804 text : str
805 The text in which to calculate the cursor offset
805 The text in which to calculate the cursor offset
806 line : int
806 line : int
807 Line of the cursor; 0-indexed
807 Line of the cursor; 0-indexed
808 column : int
808 column : int
809 Column of the cursor 0-indexed
809 Column of the cursor 0-indexed
810
810
811 Return
811 Return
812 ------
812 ------
813 Position of the cursor in ``text``, 0-indexed.
813 Position of the cursor in ``text``, 0-indexed.
814
814
815 See Also
815 See Also
816 --------
816 --------
817 position_to_cursor: reciprocal of this function
817 position_to_cursor: reciprocal of this function
818
818
819 """
819 """
820 lines = text.split('\n')
820 lines = text.split('\n')
821 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
821 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
822
822
823 return sum(len(l) + 1 for l in lines[:line]) + column
823 return sum(len(l) + 1 for l in lines[:line]) + column
824
824
825 def position_to_cursor(text:str, offset:int)->(int, int):
825 def position_to_cursor(text:str, offset:int)->(int, int):
826 """
826 """
827 Convert the position of the cursor in text (0 indexed) to a line
827 Convert the position of the cursor in text (0 indexed) to a line
828 number(0-indexed) and a column number (0-indexed) pair
828 number(0-indexed) and a column number (0-indexed) pair
829
829
830 Position should be a valid position in ``text``.
830 Position should be a valid position in ``text``.
831
831
832 Parameters
832 Parameters
833 ----------
833 ----------
834
834
835 text : str
835 text : str
836 The text in which to calculate the cursor offset
836 The text in which to calculate the cursor offset
837 offset : int
837 offset : int
838 Position of the cursor in ``text``, 0-indexed.
838 Position of the cursor in ``text``, 0-indexed.
839
839
840 Return
840 Return
841 ------
841 ------
842 (line, column) : (int, int)
842 (line, column) : (int, int)
843 Line of the cursor; 0-indexed, column of the cursor 0-indexed
843 Line of the cursor; 0-indexed, column of the cursor 0-indexed
844
844
845
845
846 See Also
846 See Also
847 --------
847 --------
848 cursor_to_position : reciprocal of this function
848 cursor_to_position : reciprocal of this function
849
849
850
850
851 """
851 """
852
852
853 assert 0 < offset <= len(text) , "0 < %s <= %s" % (offset , len(text))
853 assert 0 < offset <= len(text) , "0 < %s <= %s" % (offset , len(text))
854
854
855 before = text[:offset]
855 before = text[:offset]
856 blines = before.split('\n') # ! splitnes trim trailing \n
856 blines = before.split('\n') # ! splitnes trim trailing \n
857 line = before.count('\n')
857 line = before.count('\n')
858 col = len(blines[-1])
858 col = len(blines[-1])
859 return line, col
859 return line, col
860
860
861
861
862 def _safe_isinstance(obj, module, class_name):
862 def _safe_isinstance(obj, module, class_name):
863 """Checks if obj is an instance of module.class_name if loaded
863 """Checks if obj is an instance of module.class_name if loaded
864 """
864 """
865 return (module in sys.modules and
865 return (module in sys.modules and
866 isinstance(obj, getattr(import_module(module), class_name)))
866 isinstance(obj, getattr(import_module(module), class_name)))
867
867
868
868
869 def back_unicode_name_matches(text):
869 def back_unicode_name_matches(text):
870 u"""Match unicode characters back to unicode name
870 u"""Match unicode characters back to unicode name
871
871
872 This does ``☃`` -> ``\\snowman``
872 This does ``☃`` -> ``\\snowman``
873
873
874 Note that snowman is not a valid python3 combining character but will be expanded.
874 Note that snowman is not a valid python3 combining character but will be expanded.
875 Though it will not recombine back to the snowman character by the completion machinery.
875 Though it will not recombine back to the snowman character by the completion machinery.
876
876
877 This will not either back-complete standard sequences like \\n, \\b ...
877 This will not either back-complete standard sequences like \\n, \\b ...
878
878
879 Used on Python 3 only.
879 Used on Python 3 only.
880 """
880 """
881 if len(text)<2:
881 if len(text)<2:
882 return u'', ()
882 return u'', ()
883 maybe_slash = text[-2]
883 maybe_slash = text[-2]
884 if maybe_slash != '\\':
884 if maybe_slash != '\\':
885 return u'', ()
885 return u'', ()
886
886
887 char = text[-1]
887 char = text[-1]
888 # no expand on quote for completion in strings.
888 # no expand on quote for completion in strings.
889 # nor backcomplete standard ascii keys
889 # nor backcomplete standard ascii keys
890 if char in string.ascii_letters or char in ['"',"'"]:
890 if char in string.ascii_letters or char in ['"',"'"]:
891 return u'', ()
891 return u'', ()
892 try :
892 try :
893 unic = unicodedata.name(char)
893 unic = unicodedata.name(char)
894 return '\\'+char,['\\'+unic]
894 return '\\'+char,['\\'+unic]
895 except KeyError:
895 except KeyError:
896 pass
896 pass
897 return u'', ()
897 return u'', ()
898
898
899 def back_latex_name_matches(text:str):
899 def back_latex_name_matches(text:str):
900 """Match latex characters back to unicode name
900 """Match latex characters back to unicode name
901
901
902 This does ``\\ℵ`` -> ``\\aleph``
902 This does ``\\ℵ`` -> ``\\aleph``
903
903
904 Used on Python 3 only.
904 Used on Python 3 only.
905 """
905 """
906 if len(text)<2:
906 if len(text)<2:
907 return u'', ()
907 return u'', ()
908 maybe_slash = text[-2]
908 maybe_slash = text[-2]
909 if maybe_slash != '\\':
909 if maybe_slash != '\\':
910 return u'', ()
910 return u'', ()
911
911
912
912
913 char = text[-1]
913 char = text[-1]
914 # no expand on quote for completion in strings.
914 # no expand on quote for completion in strings.
915 # nor backcomplete standard ascii keys
915 # nor backcomplete standard ascii keys
916 if char in string.ascii_letters or char in ['"',"'"]:
916 if char in string.ascii_letters or char in ['"',"'"]:
917 return u'', ()
917 return u'', ()
918 try :
918 try :
919 latex = reverse_latex_symbol[char]
919 latex = reverse_latex_symbol[char]
920 # '\\' replace the \ as well
920 # '\\' replace the \ as well
921 return '\\'+char,[latex]
921 return '\\'+char,[latex]
922 except KeyError:
922 except KeyError:
923 pass
923 pass
924 return u'', ()
924 return u'', ()
925
925
926
926
927 class IPCompleter(Completer):
927 class IPCompleter(Completer):
928 """Extension of the completer class with IPython-specific features"""
928 """Extension of the completer class with IPython-specific features"""
929
929
930 @observe('greedy')
930 @observe('greedy')
931 def _greedy_changed(self, change):
931 def _greedy_changed(self, change):
932 """update the splitter and readline delims when greedy is changed"""
932 """update the splitter and readline delims when greedy is changed"""
933 if change['new']:
933 if change['new']:
934 self.splitter.delims = GREEDY_DELIMS
934 self.splitter.delims = GREEDY_DELIMS
935 else:
935 else:
936 self.splitter.delims = DELIMS
936 self.splitter.delims = DELIMS
937
937
938 merge_completions = Bool(True,
938 merge_completions = Bool(True,
939 help="""Whether to merge completion results into a single list
939 help="""Whether to merge completion results into a single list
940
940
941 If False, only the completion results from the first non-empty
941 If False, only the completion results from the first non-empty
942 completer will be returned.
942 completer will be returned.
943 """
943 """
944 ).tag(config=True)
944 ).tag(config=True)
945 omit__names = Enum((0,1,2), default_value=2,
945 omit__names = Enum((0,1,2), default_value=2,
946 help="""Instruct the completer to omit private method names
946 help="""Instruct the completer to omit private method names
947
947
948 Specifically, when completing on ``object.<tab>``.
948 Specifically, when completing on ``object.<tab>``.
949
949
950 When 2 [default]: all names that start with '_' will be excluded.
950 When 2 [default]: all names that start with '_' will be excluded.
951
951
952 When 1: all 'magic' names (``__foo__``) will be excluded.
952 When 1: all 'magic' names (``__foo__``) will be excluded.
953
953
954 When 0: nothing will be excluded.
954 When 0: nothing will be excluded.
955 """
955 """
956 ).tag(config=True)
956 ).tag(config=True)
957 limit_to__all__ = Bool(False,
957 limit_to__all__ = Bool(False,
958 help="""
958 help="""
959 DEPRECATED as of version 5.0.
959 DEPRECATED as of version 5.0.
960
960
961 Instruct the completer to use __all__ for the completion
961 Instruct the completer to use __all__ for the completion
962
962
963 Specifically, when completing on ``object.<tab>``.
963 Specifically, when completing on ``object.<tab>``.
964
964
965 When True: only those names in obj.__all__ will be included.
965 When True: only those names in obj.__all__ will be included.
966
966
967 When False [default]: the __all__ attribute is ignored
967 When False [default]: the __all__ attribute is ignored
968 """,
968 """,
969 ).tag(config=True)
969 ).tag(config=True)
970
970
971 @observe('limit_to__all__')
971 @observe('limit_to__all__')
972 def _limit_to_all_changed(self, change):
972 def _limit_to_all_changed(self, change):
973 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
973 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
974 'value has been deprecated since IPython 5.0, will be made to have '
974 'value has been deprecated since IPython 5.0, will be made to have '
975 'no effects and then removed in future version of IPython.',
975 'no effects and then removed in future version of IPython.',
976 UserWarning)
976 UserWarning)
977
977
978 def __init__(self, shell=None, namespace=None, global_namespace=None,
978 def __init__(self, shell=None, namespace=None, global_namespace=None,
979 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
979 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
980 """IPCompleter() -> completer
980 """IPCompleter() -> completer
981
981
982 Return a completer object.
982 Return a completer object.
983
983
984 Parameters
984 Parameters
985 ----------
985 ----------
986
986
987 shell
987 shell
988 a pointer to the ipython shell itself. This is needed
988 a pointer to the ipython shell itself. This is needed
989 because this completer knows about magic functions, and those can
989 because this completer knows about magic functions, and those can
990 only be accessed via the ipython instance.
990 only be accessed via the ipython instance.
991
991
992 namespace : dict, optional
992 namespace : dict, optional
993 an optional dict where completions are performed.
993 an optional dict where completions are performed.
994
994
995 global_namespace : dict, optional
995 global_namespace : dict, optional
996 secondary optional dict for completions, to
996 secondary optional dict for completions, to
997 handle cases (such as IPython embedded inside functions) where
997 handle cases (such as IPython embedded inside functions) where
998 both Python scopes are visible.
998 both Python scopes are visible.
999
999
1000 use_readline : bool, optional
1000 use_readline : bool, optional
1001 DEPRECATED, ignored since IPython 6.0, will have no effects
1001 DEPRECATED, ignored since IPython 6.0, will have no effects
1002 """
1002 """
1003
1003
1004 self.magic_escape = ESC_MAGIC
1004 self.magic_escape = ESC_MAGIC
1005 self.splitter = CompletionSplitter()
1005 self.splitter = CompletionSplitter()
1006
1006
1007 if use_readline is not _deprecation_readline_sentinel:
1007 if use_readline is not _deprecation_readline_sentinel:
1008 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
1008 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
1009 DeprecationWarning, stacklevel=2)
1009 DeprecationWarning, stacklevel=2)
1010
1010
1011 # _greedy_changed() depends on splitter and readline being defined:
1011 # _greedy_changed() depends on splitter and readline being defined:
1012 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
1012 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
1013 config=config, **kwargs)
1013 config=config, **kwargs)
1014
1014
1015 # List where completion matches will be stored
1015 # List where completion matches will be stored
1016 self.matches = []
1016 self.matches = []
1017 self.shell = shell
1017 self.shell = shell
1018 # Regexp to split filenames with spaces in them
1018 # Regexp to split filenames with spaces in them
1019 self.space_name_re = re.compile(r'([^\\] )')
1019 self.space_name_re = re.compile(r'([^\\] )')
1020 # Hold a local ref. to glob.glob for speed
1020 # Hold a local ref. to glob.glob for speed
1021 self.glob = glob.glob
1021 self.glob = glob.glob
1022
1022
1023 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1023 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1024 # buffers, to avoid completion problems.
1024 # buffers, to avoid completion problems.
1025 term = os.environ.get('TERM','xterm')
1025 term = os.environ.get('TERM','xterm')
1026 self.dumb_terminal = term in ['dumb','emacs']
1026 self.dumb_terminal = term in ['dumb','emacs']
1027
1027
1028 # Special handling of backslashes needed in win32 platforms
1028 # Special handling of backslashes needed in win32 platforms
1029 if sys.platform == "win32":
1029 if sys.platform == "win32":
1030 self.clean_glob = self._clean_glob_win32
1030 self.clean_glob = self._clean_glob_win32
1031 else:
1031 else:
1032 self.clean_glob = self._clean_glob
1032 self.clean_glob = self._clean_glob
1033
1033
1034 #regexp to parse docstring for function signature
1034 #regexp to parse docstring for function signature
1035 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1035 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1036 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1036 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1037 #use this if positional argument name is also needed
1037 #use this if positional argument name is also needed
1038 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1038 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1039
1039
1040 # All active matcher routines for completion
1040 # All active matcher routines for completion
1041 self.matchers = [
1041 self.matchers = [
1042 self.python_matches,
1042 self.python_matches,
1043 self.file_matches,
1043 self.file_matches,
1044 self.magic_matches,
1044 self.magic_matches,
1045 self.python_func_kw_matches,
1045 self.python_func_kw_matches,
1046 self.dict_key_matches,
1046 self.dict_key_matches,
1047 ]
1047 ]
1048
1048
1049 # This is set externally by InteractiveShell
1049 # This is set externally by InteractiveShell
1050 self.custom_completers = None
1050 self.custom_completers = None
1051
1051
1052 def all_completions(self, text):
1052 def all_completions(self, text):
1053 """
1053 """
1054 Wrapper around the complete method for the benefit of emacs.
1054 Wrapper around the complete method for the benefit of emacs.
1055 """
1055 """
1056 return self.complete(text)[1]
1056 return self.complete(text)[1]
1057
1057
1058 def _clean_glob(self, text):
1058 def _clean_glob(self, text):
1059 return self.glob("%s*" % text)
1059 return self.glob("%s*" % text)
1060
1060
1061 def _clean_glob_win32(self,text):
1061 def _clean_glob_win32(self,text):
1062 return [f.replace("\\","/")
1062 return [f.replace("\\","/")
1063 for f in self.glob("%s*" % text)]
1063 for f in self.glob("%s*" % text)]
1064
1064
1065 def file_matches(self, text):
1065 def file_matches(self, text):
1066 """Match filenames, expanding ~USER type strings.
1066 """Match filenames, expanding ~USER type strings.
1067
1067
1068 Most of the seemingly convoluted logic in this completer is an
1068 Most of the seemingly convoluted logic in this completer is an
1069 attempt to handle filenames with spaces in them. And yet it's not
1069 attempt to handle filenames with spaces in them. And yet it's not
1070 quite perfect, because Python's readline doesn't expose all of the
1070 quite perfect, because Python's readline doesn't expose all of the
1071 GNU readline details needed for this to be done correctly.
1071 GNU readline details needed for this to be done correctly.
1072
1072
1073 For a filename with a space in it, the printed completions will be
1073 For a filename with a space in it, the printed completions will be
1074 only the parts after what's already been typed (instead of the
1074 only the parts after what's already been typed (instead of the
1075 full completions, as is normally done). I don't think with the
1075 full completions, as is normally done). I don't think with the
1076 current (as of Python 2.3) Python readline it's possible to do
1076 current (as of Python 2.3) Python readline it's possible to do
1077 better."""
1077 better."""
1078
1078
1079 # chars that require escaping with backslash - i.e. chars
1079 # chars that require escaping with backslash - i.e. chars
1080 # that readline treats incorrectly as delimiters, but we
1080 # that readline treats incorrectly as delimiters, but we
1081 # don't want to treat as delimiters in filename matching
1081 # don't want to treat as delimiters in filename matching
1082 # when escaped with backslash
1082 # when escaped with backslash
1083 if text.startswith('!'):
1083 if text.startswith('!'):
1084 text = text[1:]
1084 text = text[1:]
1085 text_prefix = u'!'
1085 text_prefix = u'!'
1086 else:
1086 else:
1087 text_prefix = u''
1087 text_prefix = u''
1088
1088
1089 text_until_cursor = self.text_until_cursor
1089 text_until_cursor = self.text_until_cursor
1090 # track strings with open quotes
1090 # track strings with open quotes
1091 open_quotes = has_open_quotes(text_until_cursor)
1091 open_quotes = has_open_quotes(text_until_cursor)
1092
1092
1093 if '(' in text_until_cursor or '[' in text_until_cursor:
1093 if '(' in text_until_cursor or '[' in text_until_cursor:
1094 lsplit = text
1094 lsplit = text
1095 else:
1095 else:
1096 try:
1096 try:
1097 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1097 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1098 lsplit = arg_split(text_until_cursor)[-1]
1098 lsplit = arg_split(text_until_cursor)[-1]
1099 except ValueError:
1099 except ValueError:
1100 # typically an unmatched ", or backslash without escaped char.
1100 # typically an unmatched ", or backslash without escaped char.
1101 if open_quotes:
1101 if open_quotes:
1102 lsplit = text_until_cursor.split(open_quotes)[-1]
1102 lsplit = text_until_cursor.split(open_quotes)[-1]
1103 else:
1103 else:
1104 return []
1104 return []
1105 except IndexError:
1105 except IndexError:
1106 # tab pressed on empty line
1106 # tab pressed on empty line
1107 lsplit = ""
1107 lsplit = ""
1108
1108
1109 if not open_quotes and lsplit != protect_filename(lsplit):
1109 if not open_quotes and lsplit != protect_filename(lsplit):
1110 # if protectables are found, do matching on the whole escaped name
1110 # if protectables are found, do matching on the whole escaped name
1111 has_protectables = True
1111 has_protectables = True
1112 text0,text = text,lsplit
1112 text0,text = text,lsplit
1113 else:
1113 else:
1114 has_protectables = False
1114 has_protectables = False
1115 text = os.path.expanduser(text)
1115 text = os.path.expanduser(text)
1116
1116
1117 if text == "":
1117 if text == "":
1118 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
1118 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
1119
1119
1120 # Compute the matches from the filesystem
1120 # Compute the matches from the filesystem
1121 if sys.platform == 'win32':
1121 if sys.platform == 'win32':
1122 m0 = self.clean_glob(text)
1122 m0 = self.clean_glob(text)
1123 else:
1123 else:
1124 m0 = self.clean_glob(text.replace('\\', ''))
1124 m0 = self.clean_glob(text.replace('\\', ''))
1125
1125
1126 if has_protectables:
1126 if has_protectables:
1127 # If we had protectables, we need to revert our changes to the
1127 # If we had protectables, we need to revert our changes to the
1128 # beginning of filename so that we don't double-write the part
1128 # beginning of filename so that we don't double-write the part
1129 # of the filename we have so far
1129 # of the filename we have so far
1130 len_lsplit = len(lsplit)
1130 len_lsplit = len(lsplit)
1131 matches = [text_prefix + text0 +
1131 matches = [text_prefix + text0 +
1132 protect_filename(f[len_lsplit:]) for f in m0]
1132 protect_filename(f[len_lsplit:]) for f in m0]
1133 else:
1133 else:
1134 if open_quotes:
1134 if open_quotes:
1135 # if we have a string with an open quote, we don't need to
1135 # if we have a string with an open quote, we don't need to
1136 # protect the names at all (and we _shouldn't_, as it
1136 # protect the names beyond the quote (and we _shouldn't_, as
1137 # would cause bugs when the filesystem call is made).
1137 # it would cause bugs when the filesystem call is made).
1138 matches = m0
1138 matches = m0 if sys.platform == "win32" else\
1139 [protect_filename(f, open_quotes) for f in m0]
1139 else:
1140 else:
1140 matches = [text_prefix +
1141 matches = [text_prefix +
1141 protect_filename(f) for f in m0]
1142 protect_filename(f) for f in m0]
1142
1143
1143 # Mark directories in input list by appending '/' to their names.
1144 # Mark directories in input list by appending '/' to their names.
1144 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
1145 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
1145
1146
1146 def magic_matches(self, text):
1147 def magic_matches(self, text):
1147 """Match magics"""
1148 """Match magics"""
1148 # Get all shell magics now rather than statically, so magics loaded at
1149 # Get all shell magics now rather than statically, so magics loaded at
1149 # runtime show up too.
1150 # runtime show up too.
1150 lsm = self.shell.magics_manager.lsmagic()
1151 lsm = self.shell.magics_manager.lsmagic()
1151 line_magics = lsm['line']
1152 line_magics = lsm['line']
1152 cell_magics = lsm['cell']
1153 cell_magics = lsm['cell']
1153 pre = self.magic_escape
1154 pre = self.magic_escape
1154 pre2 = pre+pre
1155 pre2 = pre+pre
1155
1156
1156 # Completion logic:
1157 # Completion logic:
1157 # - user gives %%: only do cell magics
1158 # - user gives %%: only do cell magics
1158 # - user gives %: do both line and cell magics
1159 # - user gives %: do both line and cell magics
1159 # - no prefix: do both
1160 # - no prefix: do both
1160 # In other words, line magics are skipped if the user gives %% explicitly
1161 # In other words, line magics are skipped if the user gives %% explicitly
1161 bare_text = text.lstrip(pre)
1162 bare_text = text.lstrip(pre)
1162 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
1163 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
1163 if not text.startswith(pre2):
1164 if not text.startswith(pre2):
1164 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
1165 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
1165 return [cast_unicode_py2(c) for c in comp]
1166 return [cast_unicode_py2(c) for c in comp]
1166
1167
1167 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str):
1168 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str):
1168 """
1169 """
1169
1170
1170 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1171 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1171 cursor position.
1172 cursor position.
1172
1173
1173 Parameters
1174 Parameters
1174 ----------
1175 ----------
1175 cursor_column : int
1176 cursor_column : int
1176 column position of the cursor in ``text``, 0-indexed.
1177 column position of the cursor in ``text``, 0-indexed.
1177 cursor_line : int
1178 cursor_line : int
1178 line position of the cursor in ``text``, 0-indexed
1179 line position of the cursor in ``text``, 0-indexed
1179 text : str
1180 text : str
1180 text to complete
1181 text to complete
1181
1182
1182 Debugging
1183 Debugging
1183 ---------
1184 ---------
1184
1185
1185 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1186 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1186 object containing a string with the Jedi debug information attached.
1187 object containing a string with the Jedi debug information attached.
1187 """
1188 """
1188 namespaces = [self.namespace]
1189 namespaces = [self.namespace]
1189 if self.global_namespace is not None:
1190 if self.global_namespace is not None:
1190 namespaces.append(self.global_namespace)
1191 namespaces.append(self.global_namespace)
1191
1192
1192 completion_filter = lambda x:x
1193 completion_filter = lambda x:x
1193 # cursor_pos is an it, jedi wants line and column
1194 # cursor_pos is an it, jedi wants line and column
1194 offset = cursor_to_position(text, cursor_line, cursor_column)
1195 offset = cursor_to_position(text, cursor_line, cursor_column)
1195 # filter output if we are completing for object members
1196 # filter output if we are completing for object members
1196 if offset:
1197 if offset:
1197 pre = text[offset-1]
1198 pre = text[offset-1]
1198 if pre == '.':
1199 if pre == '.':
1199 if self.omit__names == 2:
1200 if self.omit__names == 2:
1200 completion_filter = lambda c:not c.name.startswith('_')
1201 completion_filter = lambda c:not c.name.startswith('_')
1201 elif self.omit__names == 1:
1202 elif self.omit__names == 1:
1202 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1203 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1203 elif self.omit__names == 0:
1204 elif self.omit__names == 0:
1204 completion_filter = lambda x:x
1205 completion_filter = lambda x:x
1205 else:
1206 else:
1206 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1207 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1207
1208
1208 interpreter = jedi.Interpreter(
1209 interpreter = jedi.Interpreter(
1209 text, namespaces, column=cursor_column, line=cursor_line + 1)
1210 text, namespaces, column=cursor_column, line=cursor_line + 1)
1210
1211
1211 try_jedi = False
1212 try_jedi = False
1212
1213
1213 try:
1214 try:
1214 # should we check the type of the node is Error ?
1215 # should we check the type of the node is Error ?
1215 from jedi.parser.tree import ErrorLeaf
1216 from jedi.parser.tree import ErrorLeaf
1216 next_to_last_tree = interpreter._get_module().tree_node.children[-2]
1217 next_to_last_tree = interpreter._get_module().tree_node.children[-2]
1217 completing_string = False
1218 completing_string = False
1218 if isinstance(next_to_last_tree, ErrorLeaf):
1219 if isinstance(next_to_last_tree, ErrorLeaf):
1219 completing_string = interpreter._get_module().tree_node.children[-2].value[0] in {'"', "'"}
1220 completing_string = interpreter._get_module().tree_node.children[-2].value[0] in {'"', "'"}
1220 # if we are in a string jedi is likely not the right candidate for
1221 # if we are in a string jedi is likely not the right candidate for
1221 # now. Skip it.
1222 # now. Skip it.
1222 try_jedi = not completing_string
1223 try_jedi = not completing_string
1223 except Exception as e:
1224 except Exception as e:
1224 # many of things can go wrong, we are using private API just don't crash.
1225 # many of things can go wrong, we are using private API just don't crash.
1225 if self.debug:
1226 if self.debug:
1226 print("Error detecting if completing a non-finished string :", e, '|')
1227 print("Error detecting if completing a non-finished string :", e, '|')
1227
1228
1228 if not try_jedi:
1229 if not try_jedi:
1229 return []
1230 return []
1230 try:
1231 try:
1231 return filter(completion_filter, interpreter.completions())
1232 return filter(completion_filter, interpreter.completions())
1232 except Exception as e:
1233 except Exception as e:
1233 if self.debug:
1234 if self.debug:
1234 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1235 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1235 else:
1236 else:
1236 return []
1237 return []
1237
1238
1238 def python_matches(self, text):
1239 def python_matches(self, text):
1239 """Match attributes or global python names"""
1240 """Match attributes or global python names"""
1240 if "." in text:
1241 if "." in text:
1241 try:
1242 try:
1242 matches = self.attr_matches(text)
1243 matches = self.attr_matches(text)
1243 if text.endswith('.') and self.omit__names:
1244 if text.endswith('.') and self.omit__names:
1244 if self.omit__names == 1:
1245 if self.omit__names == 1:
1245 # true if txt is _not_ a __ name, false otherwise:
1246 # true if txt is _not_ a __ name, false otherwise:
1246 no__name = (lambda txt:
1247 no__name = (lambda txt:
1247 re.match(r'.*\.__.*?__',txt) is None)
1248 re.match(r'.*\.__.*?__',txt) is None)
1248 else:
1249 else:
1249 # true if txt is _not_ a _ name, false otherwise:
1250 # true if txt is _not_ a _ name, false otherwise:
1250 no__name = (lambda txt:
1251 no__name = (lambda txt:
1251 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1252 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1252 matches = filter(no__name, matches)
1253 matches = filter(no__name, matches)
1253 except NameError:
1254 except NameError:
1254 # catches <undefined attributes>.<tab>
1255 # catches <undefined attributes>.<tab>
1255 matches = []
1256 matches = []
1256 else:
1257 else:
1257 matches = self.global_matches(text)
1258 matches = self.global_matches(text)
1258 return matches
1259 return matches
1259
1260
1260 def _default_arguments_from_docstring(self, doc):
1261 def _default_arguments_from_docstring(self, doc):
1261 """Parse the first line of docstring for call signature.
1262 """Parse the first line of docstring for call signature.
1262
1263
1263 Docstring should be of the form 'min(iterable[, key=func])\n'.
1264 Docstring should be of the form 'min(iterable[, key=func])\n'.
1264 It can also parse cython docstring of the form
1265 It can also parse cython docstring of the form
1265 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1266 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1266 """
1267 """
1267 if doc is None:
1268 if doc is None:
1268 return []
1269 return []
1269
1270
1270 #care only the firstline
1271 #care only the firstline
1271 line = doc.lstrip().splitlines()[0]
1272 line = doc.lstrip().splitlines()[0]
1272
1273
1273 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1274 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1274 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1275 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1275 sig = self.docstring_sig_re.search(line)
1276 sig = self.docstring_sig_re.search(line)
1276 if sig is None:
1277 if sig is None:
1277 return []
1278 return []
1278 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1279 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1279 sig = sig.groups()[0].split(',')
1280 sig = sig.groups()[0].split(',')
1280 ret = []
1281 ret = []
1281 for s in sig:
1282 for s in sig:
1282 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1283 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1283 ret += self.docstring_kwd_re.findall(s)
1284 ret += self.docstring_kwd_re.findall(s)
1284 return ret
1285 return ret
1285
1286
1286 def _default_arguments(self, obj):
1287 def _default_arguments(self, obj):
1287 """Return the list of default arguments of obj if it is callable,
1288 """Return the list of default arguments of obj if it is callable,
1288 or empty list otherwise."""
1289 or empty list otherwise."""
1289 call_obj = obj
1290 call_obj = obj
1290 ret = []
1291 ret = []
1291 if inspect.isbuiltin(obj):
1292 if inspect.isbuiltin(obj):
1292 pass
1293 pass
1293 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1294 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1294 if inspect.isclass(obj):
1295 if inspect.isclass(obj):
1295 #for cython embededsignature=True the constructor docstring
1296 #for cython embededsignature=True the constructor docstring
1296 #belongs to the object itself not __init__
1297 #belongs to the object itself not __init__
1297 ret += self._default_arguments_from_docstring(
1298 ret += self._default_arguments_from_docstring(
1298 getattr(obj, '__doc__', ''))
1299 getattr(obj, '__doc__', ''))
1299 # for classes, check for __init__,__new__
1300 # for classes, check for __init__,__new__
1300 call_obj = (getattr(obj, '__init__', None) or
1301 call_obj = (getattr(obj, '__init__', None) or
1301 getattr(obj, '__new__', None))
1302 getattr(obj, '__new__', None))
1302 # for all others, check if they are __call__able
1303 # for all others, check if they are __call__able
1303 elif hasattr(obj, '__call__'):
1304 elif hasattr(obj, '__call__'):
1304 call_obj = obj.__call__
1305 call_obj = obj.__call__
1305 ret += self._default_arguments_from_docstring(
1306 ret += self._default_arguments_from_docstring(
1306 getattr(call_obj, '__doc__', ''))
1307 getattr(call_obj, '__doc__', ''))
1307
1308
1308 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1309 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1309 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1310 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1310
1311
1311 try:
1312 try:
1312 sig = inspect.signature(call_obj)
1313 sig = inspect.signature(call_obj)
1313 ret.extend(k for k, v in sig.parameters.items() if
1314 ret.extend(k for k, v in sig.parameters.items() if
1314 v.kind in _keeps)
1315 v.kind in _keeps)
1315 except ValueError:
1316 except ValueError:
1316 pass
1317 pass
1317
1318
1318 return list(set(ret))
1319 return list(set(ret))
1319
1320
1320 def python_func_kw_matches(self,text):
1321 def python_func_kw_matches(self,text):
1321 """Match named parameters (kwargs) of the last open function"""
1322 """Match named parameters (kwargs) of the last open function"""
1322
1323
1323 if "." in text: # a parameter cannot be dotted
1324 if "." in text: # a parameter cannot be dotted
1324 return []
1325 return []
1325 try: regexp = self.__funcParamsRegex
1326 try: regexp = self.__funcParamsRegex
1326 except AttributeError:
1327 except AttributeError:
1327 regexp = self.__funcParamsRegex = re.compile(r'''
1328 regexp = self.__funcParamsRegex = re.compile(r'''
1328 '.*?(?<!\\)' | # single quoted strings or
1329 '.*?(?<!\\)' | # single quoted strings or
1329 ".*?(?<!\\)" | # double quoted strings or
1330 ".*?(?<!\\)" | # double quoted strings or
1330 \w+ | # identifier
1331 \w+ | # identifier
1331 \S # other characters
1332 \S # other characters
1332 ''', re.VERBOSE | re.DOTALL)
1333 ''', re.VERBOSE | re.DOTALL)
1333 # 1. find the nearest identifier that comes before an unclosed
1334 # 1. find the nearest identifier that comes before an unclosed
1334 # parenthesis before the cursor
1335 # parenthesis before the cursor
1335 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1336 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1336 tokens = regexp.findall(self.text_until_cursor)
1337 tokens = regexp.findall(self.text_until_cursor)
1337 iterTokens = reversed(tokens); openPar = 0
1338 iterTokens = reversed(tokens); openPar = 0
1338
1339
1339 for token in iterTokens:
1340 for token in iterTokens:
1340 if token == ')':
1341 if token == ')':
1341 openPar -= 1
1342 openPar -= 1
1342 elif token == '(':
1343 elif token == '(':
1343 openPar += 1
1344 openPar += 1
1344 if openPar > 0:
1345 if openPar > 0:
1345 # found the last unclosed parenthesis
1346 # found the last unclosed parenthesis
1346 break
1347 break
1347 else:
1348 else:
1348 return []
1349 return []
1349 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1350 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1350 ids = []
1351 ids = []
1351 isId = re.compile(r'\w+$').match
1352 isId = re.compile(r'\w+$').match
1352
1353
1353 while True:
1354 while True:
1354 try:
1355 try:
1355 ids.append(next(iterTokens))
1356 ids.append(next(iterTokens))
1356 if not isId(ids[-1]):
1357 if not isId(ids[-1]):
1357 ids.pop(); break
1358 ids.pop(); break
1358 if not next(iterTokens) == '.':
1359 if not next(iterTokens) == '.':
1359 break
1360 break
1360 except StopIteration:
1361 except StopIteration:
1361 break
1362 break
1362
1363
1363 # Find all named arguments already assigned to, as to avoid suggesting
1364 # Find all named arguments already assigned to, as to avoid suggesting
1364 # them again
1365 # them again
1365 usedNamedArgs = set()
1366 usedNamedArgs = set()
1366 par_level = -1
1367 par_level = -1
1367 for token, next_token in zip(tokens, tokens[1:]):
1368 for token, next_token in zip(tokens, tokens[1:]):
1368 if token == '(':
1369 if token == '(':
1369 par_level += 1
1370 par_level += 1
1370 elif token == ')':
1371 elif token == ')':
1371 par_level -= 1
1372 par_level -= 1
1372
1373
1373 if par_level != 0:
1374 if par_level != 0:
1374 continue
1375 continue
1375
1376
1376 if next_token != '=':
1377 if next_token != '=':
1377 continue
1378 continue
1378
1379
1379 usedNamedArgs.add(token)
1380 usedNamedArgs.add(token)
1380
1381
1381 # lookup the candidate callable matches either using global_matches
1382 # lookup the candidate callable matches either using global_matches
1382 # or attr_matches for dotted names
1383 # or attr_matches for dotted names
1383 if len(ids) == 1:
1384 if len(ids) == 1:
1384 callableMatches = self.global_matches(ids[0])
1385 callableMatches = self.global_matches(ids[0])
1385 else:
1386 else:
1386 callableMatches = self.attr_matches('.'.join(ids[::-1]))
1387 callableMatches = self.attr_matches('.'.join(ids[::-1]))
1387 argMatches = []
1388 argMatches = []
1388 for callableMatch in callableMatches:
1389 for callableMatch in callableMatches:
1389 try:
1390 try:
1390 namedArgs = self._default_arguments(eval(callableMatch,
1391 namedArgs = self._default_arguments(eval(callableMatch,
1391 self.namespace))
1392 self.namespace))
1392 except:
1393 except:
1393 continue
1394 continue
1394
1395
1395 # Remove used named arguments from the list, no need to show twice
1396 # Remove used named arguments from the list, no need to show twice
1396 for namedArg in set(namedArgs) - usedNamedArgs:
1397 for namedArg in set(namedArgs) - usedNamedArgs:
1397 if namedArg.startswith(text):
1398 if namedArg.startswith(text):
1398 argMatches.append(u"%s=" %namedArg)
1399 argMatches.append(u"%s=" %namedArg)
1399 return argMatches
1400 return argMatches
1400
1401
1401 def dict_key_matches(self, text):
1402 def dict_key_matches(self, text):
1402 "Match string keys in a dictionary, after e.g. 'foo[' "
1403 "Match string keys in a dictionary, after e.g. 'foo[' "
1403 def get_keys(obj):
1404 def get_keys(obj):
1404 # Objects can define their own completions by defining an
1405 # Objects can define their own completions by defining an
1405 # _ipy_key_completions_() method.
1406 # _ipy_key_completions_() method.
1406 method = get_real_method(obj, '_ipython_key_completions_')
1407 method = get_real_method(obj, '_ipython_key_completions_')
1407 if method is not None:
1408 if method is not None:
1408 return method()
1409 return method()
1409
1410
1410 # Special case some common in-memory dict-like types
1411 # Special case some common in-memory dict-like types
1411 if isinstance(obj, dict) or\
1412 if isinstance(obj, dict) or\
1412 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1413 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1413 try:
1414 try:
1414 return list(obj.keys())
1415 return list(obj.keys())
1415 except Exception:
1416 except Exception:
1416 return []
1417 return []
1417 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1418 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1418 _safe_isinstance(obj, 'numpy', 'void'):
1419 _safe_isinstance(obj, 'numpy', 'void'):
1419 return obj.dtype.names or []
1420 return obj.dtype.names or []
1420 return []
1421 return []
1421
1422
1422 try:
1423 try:
1423 regexps = self.__dict_key_regexps
1424 regexps = self.__dict_key_regexps
1424 except AttributeError:
1425 except AttributeError:
1425 dict_key_re_fmt = r'''(?x)
1426 dict_key_re_fmt = r'''(?x)
1426 ( # match dict-referring expression wrt greedy setting
1427 ( # match dict-referring expression wrt greedy setting
1427 %s
1428 %s
1428 )
1429 )
1429 \[ # open bracket
1430 \[ # open bracket
1430 \s* # and optional whitespace
1431 \s* # and optional whitespace
1431 ([uUbB]? # string prefix (r not handled)
1432 ([uUbB]? # string prefix (r not handled)
1432 (?: # unclosed string
1433 (?: # unclosed string
1433 '(?:[^']|(?<!\\)\\')*
1434 '(?:[^']|(?<!\\)\\')*
1434 |
1435 |
1435 "(?:[^"]|(?<!\\)\\")*
1436 "(?:[^"]|(?<!\\)\\")*
1436 )
1437 )
1437 )?
1438 )?
1438 $
1439 $
1439 '''
1440 '''
1440 regexps = self.__dict_key_regexps = {
1441 regexps = self.__dict_key_regexps = {
1441 False: re.compile(dict_key_re_fmt % '''
1442 False: re.compile(dict_key_re_fmt % '''
1442 # identifiers separated by .
1443 # identifiers separated by .
1443 (?!\d)\w+
1444 (?!\d)\w+
1444 (?:\.(?!\d)\w+)*
1445 (?:\.(?!\d)\w+)*
1445 '''),
1446 '''),
1446 True: re.compile(dict_key_re_fmt % '''
1447 True: re.compile(dict_key_re_fmt % '''
1447 .+
1448 .+
1448 ''')
1449 ''')
1449 }
1450 }
1450
1451
1451 match = regexps[self.greedy].search(self.text_until_cursor)
1452 match = regexps[self.greedy].search(self.text_until_cursor)
1452 if match is None:
1453 if match is None:
1453 return []
1454 return []
1454
1455
1455 expr, prefix = match.groups()
1456 expr, prefix = match.groups()
1456 try:
1457 try:
1457 obj = eval(expr, self.namespace)
1458 obj = eval(expr, self.namespace)
1458 except Exception:
1459 except Exception:
1459 try:
1460 try:
1460 obj = eval(expr, self.global_namespace)
1461 obj = eval(expr, self.global_namespace)
1461 except Exception:
1462 except Exception:
1462 return []
1463 return []
1463
1464
1464 keys = get_keys(obj)
1465 keys = get_keys(obj)
1465 if not keys:
1466 if not keys:
1466 return keys
1467 return keys
1467 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1468 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1468 if not matches:
1469 if not matches:
1469 return matches
1470 return matches
1470
1471
1471 # get the cursor position of
1472 # get the cursor position of
1472 # - the text being completed
1473 # - the text being completed
1473 # - the start of the key text
1474 # - the start of the key text
1474 # - the start of the completion
1475 # - the start of the completion
1475 text_start = len(self.text_until_cursor) - len(text)
1476 text_start = len(self.text_until_cursor) - len(text)
1476 if prefix:
1477 if prefix:
1477 key_start = match.start(2)
1478 key_start = match.start(2)
1478 completion_start = key_start + token_offset
1479 completion_start = key_start + token_offset
1479 else:
1480 else:
1480 key_start = completion_start = match.end()
1481 key_start = completion_start = match.end()
1481
1482
1482 # grab the leading prefix, to make sure all completions start with `text`
1483 # grab the leading prefix, to make sure all completions start with `text`
1483 if text_start > key_start:
1484 if text_start > key_start:
1484 leading = ''
1485 leading = ''
1485 else:
1486 else:
1486 leading = text[text_start:completion_start]
1487 leading = text[text_start:completion_start]
1487
1488
1488 # the index of the `[` character
1489 # the index of the `[` character
1489 bracket_idx = match.end(1)
1490 bracket_idx = match.end(1)
1490
1491
1491 # append closing quote and bracket as appropriate
1492 # append closing quote and bracket as appropriate
1492 # this is *not* appropriate if the opening quote or bracket is outside
1493 # this is *not* appropriate if the opening quote or bracket is outside
1493 # the text given to this method
1494 # the text given to this method
1494 suf = ''
1495 suf = ''
1495 continuation = self.line_buffer[len(self.text_until_cursor):]
1496 continuation = self.line_buffer[len(self.text_until_cursor):]
1496 if key_start > text_start and closing_quote:
1497 if key_start > text_start and closing_quote:
1497 # quotes were opened inside text, maybe close them
1498 # quotes were opened inside text, maybe close them
1498 if continuation.startswith(closing_quote):
1499 if continuation.startswith(closing_quote):
1499 continuation = continuation[len(closing_quote):]
1500 continuation = continuation[len(closing_quote):]
1500 else:
1501 else:
1501 suf += closing_quote
1502 suf += closing_quote
1502 if bracket_idx > text_start:
1503 if bracket_idx > text_start:
1503 # brackets were opened inside text, maybe close them
1504 # brackets were opened inside text, maybe close them
1504 if not continuation.startswith(']'):
1505 if not continuation.startswith(']'):
1505 suf += ']'
1506 suf += ']'
1506
1507
1507 return [leading + k + suf for k in matches]
1508 return [leading + k + suf for k in matches]
1508
1509
1509 def unicode_name_matches(self, text):
1510 def unicode_name_matches(self, text):
1510 u"""Match Latex-like syntax for unicode characters base
1511 u"""Match Latex-like syntax for unicode characters base
1511 on the name of the character.
1512 on the name of the character.
1512
1513
1513 This does ``\\GREEK SMALL LETTER ETA`` -> ``η``
1514 This does ``\\GREEK SMALL LETTER ETA`` -> ``η``
1514
1515
1515 Works only on valid python 3 identifier, or on combining characters that
1516 Works only on valid python 3 identifier, or on combining characters that
1516 will combine to form a valid identifier.
1517 will combine to form a valid identifier.
1517
1518
1518 Used on Python 3 only.
1519 Used on Python 3 only.
1519 """
1520 """
1520 slashpos = text.rfind('\\')
1521 slashpos = text.rfind('\\')
1521 if slashpos > -1:
1522 if slashpos > -1:
1522 s = text[slashpos+1:]
1523 s = text[slashpos+1:]
1523 try :
1524 try :
1524 unic = unicodedata.lookup(s)
1525 unic = unicodedata.lookup(s)
1525 # allow combining chars
1526 # allow combining chars
1526 if ('a'+unic).isidentifier():
1527 if ('a'+unic).isidentifier():
1527 return '\\'+s,[unic]
1528 return '\\'+s,[unic]
1528 except KeyError:
1529 except KeyError:
1529 pass
1530 pass
1530 return u'', []
1531 return u'', []
1531
1532
1532
1533
1533 def latex_matches(self, text):
1534 def latex_matches(self, text):
1534 u"""Match Latex syntax for unicode characters.
1535 u"""Match Latex syntax for unicode characters.
1535
1536
1536 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α``
1537 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α``
1537
1538
1538 Used on Python 3 only.
1539 Used on Python 3 only.
1539 """
1540 """
1540 slashpos = text.rfind('\\')
1541 slashpos = text.rfind('\\')
1541 if slashpos > -1:
1542 if slashpos > -1:
1542 s = text[slashpos:]
1543 s = text[slashpos:]
1543 if s in latex_symbols:
1544 if s in latex_symbols:
1544 # Try to complete a full latex symbol to unicode
1545 # Try to complete a full latex symbol to unicode
1545 # \\alpha -> α
1546 # \\alpha -> α
1546 return s, [latex_symbols[s]]
1547 return s, [latex_symbols[s]]
1547 else:
1548 else:
1548 # If a user has partially typed a latex symbol, give them
1549 # If a user has partially typed a latex symbol, give them
1549 # a full list of options \al -> [\aleph, \alpha]
1550 # a full list of options \al -> [\aleph, \alpha]
1550 matches = [k for k in latex_symbols if k.startswith(s)]
1551 matches = [k for k in latex_symbols if k.startswith(s)]
1551 return s, matches
1552 return s, matches
1552 return u'', []
1553 return u'', []
1553
1554
1554 def dispatch_custom_completer(self, text):
1555 def dispatch_custom_completer(self, text):
1555 if not self.custom_completers:
1556 if not self.custom_completers:
1556 return
1557 return
1557
1558
1558 line = self.line_buffer
1559 line = self.line_buffer
1559 if not line.strip():
1560 if not line.strip():
1560 return None
1561 return None
1561
1562
1562 # Create a little structure to pass all the relevant information about
1563 # Create a little structure to pass all the relevant information about
1563 # the current completion to any custom completer.
1564 # the current completion to any custom completer.
1564 event = SimpleNamespace()
1565 event = SimpleNamespace()
1565 event.line = line
1566 event.line = line
1566 event.symbol = text
1567 event.symbol = text
1567 cmd = line.split(None,1)[0]
1568 cmd = line.split(None,1)[0]
1568 event.command = cmd
1569 event.command = cmd
1569 event.text_until_cursor = self.text_until_cursor
1570 event.text_until_cursor = self.text_until_cursor
1570
1571
1571 # for foo etc, try also to find completer for %foo
1572 # for foo etc, try also to find completer for %foo
1572 if not cmd.startswith(self.magic_escape):
1573 if not cmd.startswith(self.magic_escape):
1573 try_magic = self.custom_completers.s_matches(
1574 try_magic = self.custom_completers.s_matches(
1574 self.magic_escape + cmd)
1575 self.magic_escape + cmd)
1575 else:
1576 else:
1576 try_magic = []
1577 try_magic = []
1577
1578
1578 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1579 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1579 try_magic,
1580 try_magic,
1580 self.custom_completers.flat_matches(self.text_until_cursor)):
1581 self.custom_completers.flat_matches(self.text_until_cursor)):
1581 try:
1582 try:
1582 res = c(event)
1583 res = c(event)
1583 if res:
1584 if res:
1584 # first, try case sensitive match
1585 # first, try case sensitive match
1585 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1586 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1586 if withcase:
1587 if withcase:
1587 return withcase
1588 return withcase
1588 # if none, then case insensitive ones are ok too
1589 # if none, then case insensitive ones are ok too
1589 text_low = text.lower()
1590 text_low = text.lower()
1590 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1591 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1591 except TryNext:
1592 except TryNext:
1592 pass
1593 pass
1593
1594
1594 return None
1595 return None
1595
1596
1596 def completions(self, text: str, offset: int)->Iterator[Completion]:
1597 def completions(self, text: str, offset: int)->Iterator[Completion]:
1597 """
1598 """
1598 Returns an iterator over the possible completions
1599 Returns an iterator over the possible completions
1599
1600
1600 .. warning:: Unstable
1601 .. warning:: Unstable
1601
1602
1602 This function is unstable, API may change without warning.
1603 This function is unstable, API may change without warning.
1603 It will also raise unless use in proper context manager.
1604 It will also raise unless use in proper context manager.
1604
1605
1605 Parameters
1606 Parameters
1606 ----------
1607 ----------
1607
1608
1608 text:str
1609 text:str
1609 Full text of the current input, multi line string.
1610 Full text of the current input, multi line string.
1610 offset:int
1611 offset:int
1611 Integer representing the position of the cursor in ``text``. Offset
1612 Integer representing the position of the cursor in ``text``. Offset
1612 is 0-based indexed.
1613 is 0-based indexed.
1613
1614
1614 Yields
1615 Yields
1615 ------
1616 ------
1616 :any:`Completion` object
1617 :any:`Completion` object
1617
1618
1618
1619
1619 The cursor on a text can either be seen as being "in between"
1620 The cursor on a text can either be seen as being "in between"
1620 characters or "On" a character depending on the interface visible to
1621 characters or "On" a character depending on the interface visible to
1621 the user. For consistency the cursor being on "in between" characters X
1622 the user. For consistency the cursor being on "in between" characters X
1622 and Y is equivalent to the cursor being "on" character Y, that is to say
1623 and Y is equivalent to the cursor being "on" character Y, that is to say
1623 the character the cursor is on is considered as being after the cursor.
1624 the character the cursor is on is considered as being after the cursor.
1624
1625
1625 Combining characters may span more that one position in the
1626 Combining characters may span more that one position in the
1626 text.
1627 text.
1627
1628
1628
1629
1629 .. note::
1630 .. note::
1630
1631
1631 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1632 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1632 fake Completion token to distinguish completion returned by Jedi
1633 fake Completion token to distinguish completion returned by Jedi
1633 and usual IPython completion.
1634 and usual IPython completion.
1634
1635
1635 .. note::
1636 .. note::
1636
1637
1637 Completions are not completely deduplicated yet. If identical
1638 Completions are not completely deduplicated yet. If identical
1638 completions are coming from different sources this function does not
1639 completions are coming from different sources this function does not
1639 ensure that each completion object will only be present once.
1640 ensure that each completion object will only be present once.
1640 """
1641 """
1641 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1642 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1642 "It may change without warnings. "
1643 "It may change without warnings. "
1643 "Use in corresponding context manager.",
1644 "Use in corresponding context manager.",
1644 category=ProvisionalCompleterWarning, stacklevel=2)
1645 category=ProvisionalCompleterWarning, stacklevel=2)
1645
1646
1646 seen = set()
1647 seen = set()
1647 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1648 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1648 if c and (c in seen):
1649 if c and (c in seen):
1649 continue
1650 continue
1650 yield c
1651 yield c
1651 seen.add(c)
1652 seen.add(c)
1652
1653
1653 def _completions(self, full_text: str, offset: int, *, _timeout)->Iterator[Completion]:
1654 def _completions(self, full_text: str, offset: int, *, _timeout)->Iterator[Completion]:
1654 """
1655 """
1655 Core completion module.Same signature as :any:`completions`, with the
1656 Core completion module.Same signature as :any:`completions`, with the
1656 extra `timeout` parameter (in seconds).
1657 extra `timeout` parameter (in seconds).
1657
1658
1658
1659
1659 Computing jedi's completion ``.type`` can be quite expensive (it is a
1660 Computing jedi's completion ``.type`` can be quite expensive (it is a
1660 lazy property) and can require some warm-up, more warm up than just
1661 lazy property) and can require some warm-up, more warm up than just
1661 computing the ``name`` of a completion. The warm-up can be :
1662 computing the ``name`` of a completion. The warm-up can be :
1662
1663
1663 - Long warm-up the fisrt time a module is encountered after
1664 - Long warm-up the fisrt time a module is encountered after
1664 install/update: actually build parse/inference tree.
1665 install/update: actually build parse/inference tree.
1665
1666
1666 - first time the module is encountered in a session: load tree from
1667 - first time the module is encountered in a session: load tree from
1667 disk.
1668 disk.
1668
1669
1669 We don't want to block completions for tens of seconds so we give the
1670 We don't want to block completions for tens of seconds so we give the
1670 completer a "budget" of ``_timeout`` seconds per invocation to compute
1671 completer a "budget" of ``_timeout`` seconds per invocation to compute
1671 completions types, the completions that have not yet been computed will
1672 completions types, the completions that have not yet been computed will
1672 be marked as "unknown" an will have a chance to be computed next round
1673 be marked as "unknown" an will have a chance to be computed next round
1673 are things get cached.
1674 are things get cached.
1674
1675
1675 Keep in mind that Jedi is not the only thing treating the completion so
1676 Keep in mind that Jedi is not the only thing treating the completion so
1676 keep the timeout short-ish as if we take more than 0.3 second we still
1677 keep the timeout short-ish as if we take more than 0.3 second we still
1677 have lots of processing to do.
1678 have lots of processing to do.
1678
1679
1679 """
1680 """
1680 deadline = time.monotonic() + _timeout
1681 deadline = time.monotonic() + _timeout
1681
1682
1682
1683
1683 before = full_text[:offset]
1684 before = full_text[:offset]
1684 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1685 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1685
1686
1686 matched_text, matches, matches_origin, jedi_matches = self._complete(
1687 matched_text, matches, matches_origin, jedi_matches = self._complete(
1687 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1688 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1688
1689
1689 iter_jm = iter(jedi_matches)
1690 iter_jm = iter(jedi_matches)
1690 if _timeout:
1691 if _timeout:
1691 for jm in iter_jm:
1692 for jm in iter_jm:
1692 try:
1693 try:
1693 type_ = jm.type
1694 type_ = jm.type
1694 except Exception:
1695 except Exception:
1695 if self.debug:
1696 if self.debug:
1696 print("Error in Jedi getting type of ", jm)
1697 print("Error in Jedi getting type of ", jm)
1697 type_ = None
1698 type_ = None
1698 delta = len(jm.name_with_symbols) - len(jm.complete)
1699 delta = len(jm.name_with_symbols) - len(jm.complete)
1699 yield Completion(start=offset - delta,
1700 yield Completion(start=offset - delta,
1700 end=offset,
1701 end=offset,
1701 text=jm.name_with_symbols,
1702 text=jm.name_with_symbols,
1702 type=type_,
1703 type=type_,
1703 _origin='jedi')
1704 _origin='jedi')
1704
1705
1705 if time.monotonic() > deadline:
1706 if time.monotonic() > deadline:
1706 break
1707 break
1707
1708
1708 for jm in iter_jm:
1709 for jm in iter_jm:
1709 delta = len(jm.name_with_symbols) - len(jm.complete)
1710 delta = len(jm.name_with_symbols) - len(jm.complete)
1710 yield Completion(start=offset - delta,
1711 yield Completion(start=offset - delta,
1711 end=offset,
1712 end=offset,
1712 text=jm.name_with_symbols,
1713 text=jm.name_with_symbols,
1713 type='<unknown>', # don't compute type for speed
1714 type='<unknown>', # don't compute type for speed
1714 _origin='jedi')
1715 _origin='jedi')
1715
1716
1716
1717
1717 start_offset = before.rfind(matched_text)
1718 start_offset = before.rfind(matched_text)
1718
1719
1719 # TODO:
1720 # TODO:
1720 # Supress this, right now just for debug.
1721 # Supress this, right now just for debug.
1721 if jedi_matches and matches and self.debug:
1722 if jedi_matches and matches and self.debug:
1722 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--', _origin='debug')
1723 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--', _origin='debug')
1723
1724
1724 # I'm unsure if this is always true, so let's assert and see if it
1725 # I'm unsure if this is always true, so let's assert and see if it
1725 # crash
1726 # crash
1726 assert before.endswith(matched_text)
1727 assert before.endswith(matched_text)
1727 for m, t in zip(matches, matches_origin):
1728 for m, t in zip(matches, matches_origin):
1728 yield Completion(start=start_offset, end=offset, text=m, _origin=t)
1729 yield Completion(start=start_offset, end=offset, text=m, _origin=t)
1729
1730
1730
1731
1731 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1732 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1732 """Find completions for the given text and line context.
1733 """Find completions for the given text and line context.
1733
1734
1734 Note that both the text and the line_buffer are optional, but at least
1735 Note that both the text and the line_buffer are optional, but at least
1735 one of them must be given.
1736 one of them must be given.
1736
1737
1737 Parameters
1738 Parameters
1738 ----------
1739 ----------
1739 text : string, optional
1740 text : string, optional
1740 Text to perform the completion on. If not given, the line buffer
1741 Text to perform the completion on. If not given, the line buffer
1741 is split using the instance's CompletionSplitter object.
1742 is split using the instance's CompletionSplitter object.
1742
1743
1743 line_buffer : string, optional
1744 line_buffer : string, optional
1744 If not given, the completer attempts to obtain the current line
1745 If not given, the completer attempts to obtain the current line
1745 buffer via readline. This keyword allows clients which are
1746 buffer via readline. This keyword allows clients which are
1746 requesting for text completions in non-readline contexts to inform
1747 requesting for text completions in non-readline contexts to inform
1747 the completer of the entire text.
1748 the completer of the entire text.
1748
1749
1749 cursor_pos : int, optional
1750 cursor_pos : int, optional
1750 Index of the cursor in the full line buffer. Should be provided by
1751 Index of the cursor in the full line buffer. Should be provided by
1751 remote frontends where kernel has no access to frontend state.
1752 remote frontends where kernel has no access to frontend state.
1752
1753
1753 Returns
1754 Returns
1754 -------
1755 -------
1755 text : str
1756 text : str
1756 Text that was actually used in the completion.
1757 Text that was actually used in the completion.
1757
1758
1758 matches : list
1759 matches : list
1759 A list of completion matches.
1760 A list of completion matches.
1760
1761
1761
1762
1762 .. note::
1763 .. note::
1763
1764
1764 This API is likely to be deprecated and replaced by
1765 This API is likely to be deprecated and replaced by
1765 :any:`IPCompleter.completions` in the future.
1766 :any:`IPCompleter.completions` in the future.
1766
1767
1767
1768
1768 """
1769 """
1769 warnings.warn('`Completer.complete` is pending deprecation since '
1770 warnings.warn('`Completer.complete` is pending deprecation since '
1770 'IPython 6.0 and will be replaced by `Completer.completions`.',
1771 'IPython 6.0 and will be replaced by `Completer.completions`.',
1771 PendingDeprecationWarning)
1772 PendingDeprecationWarning)
1772 # potential todo, FOLD the 3rd throw away argument of _complete
1773 # potential todo, FOLD the 3rd throw away argument of _complete
1773 # into the first 2 one.
1774 # into the first 2 one.
1774 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
1775 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
1775
1776
1776 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
1777 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
1777 full_text=None, return_jedi_results=True) -> (str, List[str], List[object]):
1778 full_text=None, return_jedi_results=True) -> (str, List[str], List[object]):
1778 """
1779 """
1779
1780
1780 Like complete but can also returns raw jedi completions as well as the
1781 Like complete but can also returns raw jedi completions as well as the
1781 origin of the completion text. This could (and should) be made much
1782 origin of the completion text. This could (and should) be made much
1782 cleaner but that will be simpler once we drop the old (and stateful)
1783 cleaner but that will be simpler once we drop the old (and stateful)
1783 :any:`complete` API.
1784 :any:`complete` API.
1784
1785
1785
1786
1786 With current provisional API, cursor_pos act both (depending on the
1787 With current provisional API, cursor_pos act both (depending on the
1787 caller) as the offset in the ``text`` or ``line_buffer``, or as the
1788 caller) as the offset in the ``text`` or ``line_buffer``, or as the
1788 ``column`` when passing multiline strings this could/should be renamed
1789 ``column`` when passing multiline strings this could/should be renamed
1789 but would add extra noise.
1790 but would add extra noise.
1790 """
1791 """
1791
1792
1792 # if the cursor position isn't given, the only sane assumption we can
1793 # if the cursor position isn't given, the only sane assumption we can
1793 # make is that it's at the end of the line (the common case)
1794 # make is that it's at the end of the line (the common case)
1794 if cursor_pos is None:
1795 if cursor_pos is None:
1795 cursor_pos = len(line_buffer) if text is None else len(text)
1796 cursor_pos = len(line_buffer) if text is None else len(text)
1796
1797
1797 if self.use_main_ns:
1798 if self.use_main_ns:
1798 self.namespace = __main__.__dict__
1799 self.namespace = __main__.__dict__
1799
1800
1800 # if text is either None or an empty string, rely on the line buffer
1801 # if text is either None or an empty string, rely on the line buffer
1801 if (not line_buffer) and full_text:
1802 if (not line_buffer) and full_text:
1802 line_buffer = full_text.split('\n')[cursor_line]
1803 line_buffer = full_text.split('\n')[cursor_line]
1803 if not text:
1804 if not text:
1804 text = self.splitter.split_line(line_buffer, cursor_pos)
1805 text = self.splitter.split_line(line_buffer, cursor_pos)
1805
1806
1806 if self.backslash_combining_completions:
1807 if self.backslash_combining_completions:
1807 # allow deactivation of these on windows.
1808 # allow deactivation of these on windows.
1808 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1809 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1809 latex_text, latex_matches = self.latex_matches(base_text)
1810 latex_text, latex_matches = self.latex_matches(base_text)
1810 if latex_matches:
1811 if latex_matches:
1811 return latex_text, latex_matches, ['latex_matches']*len(latex_matches), ()
1812 return latex_text, latex_matches, ['latex_matches']*len(latex_matches), ()
1812 name_text = ''
1813 name_text = ''
1813 name_matches = []
1814 name_matches = []
1814 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1815 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1815 name_text, name_matches = meth(base_text)
1816 name_text, name_matches = meth(base_text)
1816 if name_text:
1817 if name_text:
1817 return name_text, name_matches, [meth.__qualname__]*len(name_matches), {}
1818 return name_text, name_matches, [meth.__qualname__]*len(name_matches), {}
1818
1819
1819
1820
1820 # If no line buffer is given, assume the input text is all there was
1821 # If no line buffer is given, assume the input text is all there was
1821 if line_buffer is None:
1822 if line_buffer is None:
1822 line_buffer = text
1823 line_buffer = text
1823
1824
1824 self.line_buffer = line_buffer
1825 self.line_buffer = line_buffer
1825 self.text_until_cursor = self.line_buffer[:cursor_pos]
1826 self.text_until_cursor = self.line_buffer[:cursor_pos]
1826
1827
1827 # Start with a clean slate of completions
1828 # Start with a clean slate of completions
1828 matches = []
1829 matches = []
1829 custom_res = self.dispatch_custom_completer(text)
1830 custom_res = self.dispatch_custom_completer(text)
1830 # FIXME: we should extend our api to return a dict with completions for
1831 # FIXME: we should extend our api to return a dict with completions for
1831 # different types of objects. The rlcomplete() method could then
1832 # different types of objects. The rlcomplete() method could then
1832 # simply collapse the dict into a list for readline, but we'd have
1833 # simply collapse the dict into a list for readline, but we'd have
1833 # richer completion semantics in other evironments.
1834 # richer completion semantics in other evironments.
1834 completions = ()
1835 completions = ()
1835 if self.use_jedi and return_jedi_results:
1836 if self.use_jedi and return_jedi_results:
1836 if not full_text:
1837 if not full_text:
1837 full_text = line_buffer
1838 full_text = line_buffer
1838 completions = self._jedi_matches(
1839 completions = self._jedi_matches(
1839 cursor_pos, cursor_line, full_text)
1840 cursor_pos, cursor_line, full_text)
1840 if custom_res is not None:
1841 if custom_res is not None:
1841 # did custom completers produce something?
1842 # did custom completers produce something?
1842 matches = [(m, 'custom') for m in custom_res]
1843 matches = [(m, 'custom') for m in custom_res]
1843 else:
1844 else:
1844 # Extend the list of completions with the results of each
1845 # Extend the list of completions with the results of each
1845 # matcher, so we return results to the user from all
1846 # matcher, so we return results to the user from all
1846 # namespaces.
1847 # namespaces.
1847 if self.merge_completions:
1848 if self.merge_completions:
1848 matches = []
1849 matches = []
1849 for matcher in self.matchers:
1850 for matcher in self.matchers:
1850 try:
1851 try:
1851 matches.extend([(m, matcher.__qualname__)
1852 matches.extend([(m, matcher.__qualname__)
1852 for m in matcher(text)])
1853 for m in matcher(text)])
1853 except:
1854 except:
1854 # Show the ugly traceback if the matcher causes an
1855 # Show the ugly traceback if the matcher causes an
1855 # exception, but do NOT crash the kernel!
1856 # exception, but do NOT crash the kernel!
1856 sys.excepthook(*sys.exc_info())
1857 sys.excepthook(*sys.exc_info())
1857 else:
1858 else:
1858 for matcher in self.matchers:
1859 for matcher in self.matchers:
1859 matches = [(m, matcher.__qualname__)
1860 matches = [(m, matcher.__qualname__)
1860 for m in matcher(text)]
1861 for m in matcher(text)]
1861 if matches:
1862 if matches:
1862 break
1863 break
1863 seen = set()
1864 seen = set()
1864 filtered_matches = set()
1865 filtered_matches = set()
1865 for m in matches:
1866 for m in matches:
1866 t, c = m
1867 t, c = m
1867 if t not in seen:
1868 if t not in seen:
1868 filtered_matches.add(m)
1869 filtered_matches.add(m)
1869 seen.add(t)
1870 seen.add(t)
1870
1871
1871 filtered_matches = sorted(
1872 filtered_matches = sorted(
1872 set(filtered_matches), key=lambda x: completions_sorting_key(x[0]))
1873 set(filtered_matches), key=lambda x: completions_sorting_key(x[0]))
1873
1874
1874 matches = [m[0] for m in filtered_matches]
1875 matches = [m[0] for m in filtered_matches]
1875 origins = [m[1] for m in filtered_matches]
1876 origins = [m[1] for m in filtered_matches]
1876
1877
1877 self.matches = matches
1878 self.matches = matches
1878
1879
1879 return text, matches, origins, completions
1880 return text, matches, origins, completions
@@ -1,860 +1,891 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Tests for the IPython tab-completion machinery."""
2 """Tests for the IPython tab-completion machinery."""
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import sys
8 import sys
9 import textwrap
9 import textwrap
10 import unittest
10 import unittest
11
11
12 from contextlib import contextmanager
12 from contextlib import contextmanager
13
13
14 import nose.tools as nt
14 import nose.tools as nt
15
15
16 from traitlets.config.loader import Config
16 from traitlets.config.loader import Config
17 from IPython import get_ipython
17 from IPython import get_ipython
18 from IPython.core import completer
18 from IPython.core import completer
19 from IPython.external.decorators import knownfailureif
19 from IPython.external.decorators import knownfailureif
20 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
21 from IPython.utils.generics import complete_object
21 from IPython.utils.generics import complete_object
22 from IPython.testing import decorators as dec
22 from IPython.testing import decorators as dec
23
23
24 from IPython.core.completer import (
24 from IPython.core.completer import (
25 Completion, provisionalcompleter, match_dict_keys, _deduplicate_completions)
25 Completion, provisionalcompleter, match_dict_keys, _deduplicate_completions)
26 from nose.tools import assert_in, assert_not_in
26 from nose.tools import assert_in, assert_not_in
27
27
28 #-----------------------------------------------------------------------------
28 #-----------------------------------------------------------------------------
29 # Test functions
29 # Test functions
30 #-----------------------------------------------------------------------------
30 #-----------------------------------------------------------------------------
31
31
32 @contextmanager
32 @contextmanager
33 def greedy_completion():
33 def greedy_completion():
34 ip = get_ipython()
34 ip = get_ipython()
35 greedy_original = ip.Completer.greedy
35 greedy_original = ip.Completer.greedy
36 try:
36 try:
37 ip.Completer.greedy = True
37 ip.Completer.greedy = True
38 yield
38 yield
39 finally:
39 finally:
40 ip.Completer.greedy = greedy_original
40 ip.Completer.greedy = greedy_original
41
41
42 def test_protect_filename():
42 def test_protect_filename():
43 if sys.platform == 'win32':
43 if sys.platform == 'win32':
44 pairs = [('abc','abc'),
44 pairs = [('abc','abc'),
45 (' abc','" abc"'),
45 (' abc','" abc"'),
46 ('a bc','"a bc"'),
46 ('a bc','"a bc"'),
47 ('a bc','"a bc"'),
47 ('a bc','"a bc"'),
48 (' bc','" bc"'),
48 (' bc','" bc"'),
49 ]
49 ]
50 else:
50 else:
51 pairs = [('abc','abc'),
51 pairs = [('abc','abc'),
52 (' abc',r'\ abc'),
52 (' abc',r'\ abc'),
53 ('a bc',r'a\ bc'),
53 ('a bc',r'a\ bc'),
54 ('a bc',r'a\ \ bc'),
54 ('a bc',r'a\ \ bc'),
55 (' bc',r'\ \ bc'),
55 (' bc',r'\ \ bc'),
56 # On posix, we also protect parens and other special characters.
56 # On posix, we also protect parens and other special characters.
57 ('a(bc',r'a\(bc'),
57 ('a(bc',r'a\(bc'),
58 ('a)bc',r'a\)bc'),
58 ('a)bc',r'a\)bc'),
59 ('a( )bc',r'a\(\ \)bc'),
59 ('a( )bc',r'a\(\ \)bc'),
60 ('a[1]bc', r'a\[1\]bc'),
60 ('a[1]bc', r'a\[1\]bc'),
61 ('a{1}bc', r'a\{1\}bc'),
61 ('a{1}bc', r'a\{1\}bc'),
62 ('a#bc', r'a\#bc'),
62 ('a#bc', r'a\#bc'),
63 ('a?bc', r'a\?bc'),
63 ('a?bc', r'a\?bc'),
64 ('a=bc', r'a\=bc'),
64 ('a=bc', r'a\=bc'),
65 ('a\\bc', r'a\\bc'),
65 ('a\\bc', r'a\\bc'),
66 ('a|bc', r'a\|bc'),
66 ('a|bc', r'a\|bc'),
67 ('a;bc', r'a\;bc'),
67 ('a;bc', r'a\;bc'),
68 ('a:bc', r'a\:bc'),
68 ('a:bc', r'a\:bc'),
69 ("a'bc", r"a\'bc"),
69 ("a'bc", r"a\'bc"),
70 ('a*bc', r'a\*bc'),
70 ('a*bc', r'a\*bc'),
71 ('a"bc', r'a\"bc'),
71 ('a"bc', r'a\"bc'),
72 ('a^bc', r'a\^bc'),
72 ('a^bc', r'a\^bc'),
73 ('a&bc', r'a\&bc'),
73 ('a&bc', r'a\&bc'),
74 ]
74 ]
75 # run the actual tests
75 # run the actual tests
76 for s1, s2 in pairs:
76 for s1, s2 in pairs:
77 s1p = completer.protect_filename(s1)
77 s1p = completer.protect_filename(s1)
78 nt.assert_equal(s1p, s2)
78 nt.assert_equal(s1p, s2)
79
79
80
80
81 def check_line_split(splitter, test_specs):
81 def check_line_split(splitter, test_specs):
82 for part1, part2, split in test_specs:
82 for part1, part2, split in test_specs:
83 cursor_pos = len(part1)
83 cursor_pos = len(part1)
84 line = part1+part2
84 line = part1+part2
85 out = splitter.split_line(line, cursor_pos)
85 out = splitter.split_line(line, cursor_pos)
86 nt.assert_equal(out, split)
86 nt.assert_equal(out, split)
87
87
88
88
89 def test_line_split():
89 def test_line_split():
90 """Basic line splitter test with default specs."""
90 """Basic line splitter test with default specs."""
91 sp = completer.CompletionSplitter()
91 sp = completer.CompletionSplitter()
92 # The format of the test specs is: part1, part2, expected answer. Parts 1
92 # The format of the test specs is: part1, part2, expected answer. Parts 1
93 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
93 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
94 # was at the end of part1. So an empty part2 represents someone hitting
94 # was at the end of part1. So an empty part2 represents someone hitting
95 # tab at the end of the line, the most common case.
95 # tab at the end of the line, the most common case.
96 t = [('run some/scrip', '', 'some/scrip'),
96 t = [('run some/scrip', '', 'some/scrip'),
97 ('run scripts/er', 'ror.py foo', 'scripts/er'),
97 ('run scripts/er', 'ror.py foo', 'scripts/er'),
98 ('echo $HOM', '', 'HOM'),
98 ('echo $HOM', '', 'HOM'),
99 ('print sys.pa', '', 'sys.pa'),
99 ('print sys.pa', '', 'sys.pa'),
100 ('print(sys.pa', '', 'sys.pa'),
100 ('print(sys.pa', '', 'sys.pa'),
101 ("execfile('scripts/er", '', 'scripts/er'),
101 ("execfile('scripts/er", '', 'scripts/er'),
102 ('a[x.', '', 'x.'),
102 ('a[x.', '', 'x.'),
103 ('a[x.', 'y', 'x.'),
103 ('a[x.', 'y', 'x.'),
104 ('cd "some_file/', '', 'some_file/'),
104 ('cd "some_file/', '', 'some_file/'),
105 ]
105 ]
106 check_line_split(sp, t)
106 check_line_split(sp, t)
107 # Ensure splitting works OK with unicode by re-running the tests with
107 # Ensure splitting works OK with unicode by re-running the tests with
108 # all inputs turned into unicode
108 # all inputs turned into unicode
109 check_line_split(sp, [ map(str, p) for p in t] )
109 check_line_split(sp, [ map(str, p) for p in t] )
110
110
111
111
112 def test_custom_completion_error():
112 def test_custom_completion_error():
113 """Test that errors from custom attribute completers are silenced."""
113 """Test that errors from custom attribute completers are silenced."""
114 ip = get_ipython()
114 ip = get_ipython()
115 class A(object): pass
115 class A(object): pass
116 ip.user_ns['a'] = A()
116 ip.user_ns['a'] = A()
117
117
118 @complete_object.when_type(A)
118 @complete_object.when_type(A)
119 def complete_A(a, existing_completions):
119 def complete_A(a, existing_completions):
120 raise TypeError("this should be silenced")
120 raise TypeError("this should be silenced")
121
121
122 ip.complete("a.")
122 ip.complete("a.")
123
123
124
124
125 def test_unicode_completions():
125 def test_unicode_completions():
126 ip = get_ipython()
126 ip = get_ipython()
127 # Some strings that trigger different types of completion. Check them both
127 # Some strings that trigger different types of completion. Check them both
128 # in str and unicode forms
128 # in str and unicode forms
129 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
129 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
130 for t in s + list(map(str, s)):
130 for t in s + list(map(str, s)):
131 # We don't need to check exact completion values (they may change
131 # We don't need to check exact completion values (they may change
132 # depending on the state of the namespace, but at least no exceptions
132 # depending on the state of the namespace, but at least no exceptions
133 # should be thrown and the return value should be a pair of text, list
133 # should be thrown and the return value should be a pair of text, list
134 # values.
134 # values.
135 text, matches = ip.complete(t)
135 text, matches = ip.complete(t)
136 nt.assert_true(isinstance(text, str))
136 nt.assert_true(isinstance(text, str))
137 nt.assert_true(isinstance(matches, list))
137 nt.assert_true(isinstance(matches, list))
138
138
139 def test_latex_completions():
139 def test_latex_completions():
140 from IPython.core.latex_symbols import latex_symbols
140 from IPython.core.latex_symbols import latex_symbols
141 import random
141 import random
142 ip = get_ipython()
142 ip = get_ipython()
143 # Test some random unicode symbols
143 # Test some random unicode symbols
144 keys = random.sample(latex_symbols.keys(), 10)
144 keys = random.sample(latex_symbols.keys(), 10)
145 for k in keys:
145 for k in keys:
146 text, matches = ip.complete(k)
146 text, matches = ip.complete(k)
147 nt.assert_equal(len(matches),1)
147 nt.assert_equal(len(matches),1)
148 nt.assert_equal(text, k)
148 nt.assert_equal(text, k)
149 nt.assert_equal(matches[0], latex_symbols[k])
149 nt.assert_equal(matches[0], latex_symbols[k])
150 # Test a more complex line
150 # Test a more complex line
151 text, matches = ip.complete(u'print(\\alpha')
151 text, matches = ip.complete(u'print(\\alpha')
152 nt.assert_equal(text, u'\\alpha')
152 nt.assert_equal(text, u'\\alpha')
153 nt.assert_equal(matches[0], latex_symbols['\\alpha'])
153 nt.assert_equal(matches[0], latex_symbols['\\alpha'])
154 # Test multiple matching latex symbols
154 # Test multiple matching latex symbols
155 text, matches = ip.complete(u'\\al')
155 text, matches = ip.complete(u'\\al')
156 nt.assert_in('\\alpha', matches)
156 nt.assert_in('\\alpha', matches)
157 nt.assert_in('\\aleph', matches)
157 nt.assert_in('\\aleph', matches)
158
158
159
159
160
160
161
161
162 def test_back_latex_completion():
162 def test_back_latex_completion():
163 ip = get_ipython()
163 ip = get_ipython()
164
164
165 # do not return more than 1 matches fro \beta, only the latex one.
165 # do not return more than 1 matches fro \beta, only the latex one.
166 name, matches = ip.complete('\\β')
166 name, matches = ip.complete('\\β')
167 nt.assert_equal(len(matches), 1)
167 nt.assert_equal(len(matches), 1)
168 nt.assert_equal(matches[0], '\\beta')
168 nt.assert_equal(matches[0], '\\beta')
169
169
170 def test_back_unicode_completion():
170 def test_back_unicode_completion():
171 ip = get_ipython()
171 ip = get_ipython()
172
172
173 name, matches = ip.complete('\\Ⅴ')
173 name, matches = ip.complete('\\Ⅴ')
174 nt.assert_equal(len(matches), 1)
174 nt.assert_equal(len(matches), 1)
175 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
175 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
176
176
177
177
178 def test_forward_unicode_completion():
178 def test_forward_unicode_completion():
179 ip = get_ipython()
179 ip = get_ipython()
180
180
181 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
181 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
182 nt.assert_equal(len(matches), 1)
182 nt.assert_equal(len(matches), 1)
183 nt.assert_equal(matches[0], 'Ⅴ')
183 nt.assert_equal(matches[0], 'Ⅴ')
184
184
185 @dec.knownfailureif(sys.platform == 'win32', 'Fails if there is a C:\\j... path')
185 @dec.knownfailureif(sys.platform == 'win32', 'Fails if there is a C:\\j... path')
186 def test_no_ascii_back_completion():
186 def test_no_ascii_back_completion():
187 ip = get_ipython()
187 ip = get_ipython()
188 with TemporaryWorkingDirectory(): # Avoid any filename completions
188 with TemporaryWorkingDirectory(): # Avoid any filename completions
189 # single ascii letter that don't have yet completions
189 # single ascii letter that don't have yet completions
190 for letter in 'jJ' :
190 for letter in 'jJ' :
191 name, matches = ip.complete('\\'+letter)
191 name, matches = ip.complete('\\'+letter)
192 nt.assert_equal(matches, [])
192 nt.assert_equal(matches, [])
193
193
194
194
195
195
196
196
197 class CompletionSplitterTestCase(unittest.TestCase):
197 class CompletionSplitterTestCase(unittest.TestCase):
198 def setUp(self):
198 def setUp(self):
199 self.sp = completer.CompletionSplitter()
199 self.sp = completer.CompletionSplitter()
200
200
201 def test_delim_setting(self):
201 def test_delim_setting(self):
202 self.sp.delims = ' '
202 self.sp.delims = ' '
203 nt.assert_equal(self.sp.delims, ' ')
203 nt.assert_equal(self.sp.delims, ' ')
204 nt.assert_equal(self.sp._delim_expr, '[\ ]')
204 nt.assert_equal(self.sp._delim_expr, '[\ ]')
205
205
206 def test_spaces(self):
206 def test_spaces(self):
207 """Test with only spaces as split chars."""
207 """Test with only spaces as split chars."""
208 self.sp.delims = ' '
208 self.sp.delims = ' '
209 t = [('foo', '', 'foo'),
209 t = [('foo', '', 'foo'),
210 ('run foo', '', 'foo'),
210 ('run foo', '', 'foo'),
211 ('run foo', 'bar', 'foo'),
211 ('run foo', 'bar', 'foo'),
212 ]
212 ]
213 check_line_split(self.sp, t)
213 check_line_split(self.sp, t)
214
214
215
215
216 def test_has_open_quotes1():
216 def test_has_open_quotes1():
217 for s in ["'", "'''", "'hi' '"]:
217 for s in ["'", "'''", "'hi' '"]:
218 nt.assert_equal(completer.has_open_quotes(s), "'")
218 nt.assert_equal(completer.has_open_quotes(s), "'")
219
219
220
220
221 def test_has_open_quotes2():
221 def test_has_open_quotes2():
222 for s in ['"', '"""', '"hi" "']:
222 for s in ['"', '"""', '"hi" "']:
223 nt.assert_equal(completer.has_open_quotes(s), '"')
223 nt.assert_equal(completer.has_open_quotes(s), '"')
224
224
225
225
226 def test_has_open_quotes3():
226 def test_has_open_quotes3():
227 for s in ["''", "''' '''", "'hi' 'ipython'"]:
227 for s in ["''", "''' '''", "'hi' 'ipython'"]:
228 nt.assert_false(completer.has_open_quotes(s))
228 nt.assert_false(completer.has_open_quotes(s))
229
229
230
230
231 def test_has_open_quotes4():
231 def test_has_open_quotes4():
232 for s in ['""', '""" """', '"hi" "ipython"']:
232 for s in ['""', '""" """', '"hi" "ipython"']:
233 nt.assert_false(completer.has_open_quotes(s))
233 nt.assert_false(completer.has_open_quotes(s))
234
234
235
235
236 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
236 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
237 def test_abspath_file_completions():
237 def test_abspath_file_completions():
238 ip = get_ipython()
238 ip = get_ipython()
239 with TemporaryDirectory() as tmpdir:
239 with TemporaryDirectory() as tmpdir:
240 prefix = os.path.join(tmpdir, 'foo')
240 prefix = os.path.join(tmpdir, 'foo')
241 suffixes = ['1', '2']
241 suffixes = ['1', '2']
242 names = [prefix+s for s in suffixes]
242 names = [prefix+s for s in suffixes]
243 for n in names:
243 for n in names:
244 open(n, 'w').close()
244 open(n, 'w').close()
245
245
246 # Check simple completion
246 # Check simple completion
247 c = ip.complete(prefix)[1]
247 c = ip.complete(prefix)[1]
248 nt.assert_equal(c, names)
248 nt.assert_equal(c, names)
249
249
250 # Now check with a function call
250 # Now check with a function call
251 cmd = 'a = f("%s' % prefix
251 cmd = 'a = f("%s' % prefix
252 c = ip.complete(prefix, cmd)[1]
252 c = ip.complete(prefix, cmd)[1]
253 comp = [prefix+s for s in suffixes]
253 comp = [prefix+s for s in suffixes]
254 nt.assert_equal(c, comp)
254 nt.assert_equal(c, comp)
255
255
256
256
257 def test_local_file_completions():
257 def test_local_file_completions():
258 ip = get_ipython()
258 ip = get_ipython()
259 with TemporaryWorkingDirectory():
259 with TemporaryWorkingDirectory():
260 prefix = './foo'
260 prefix = './foo'
261 suffixes = ['1', '2']
261 suffixes = ['1', '2']
262 names = [prefix+s for s in suffixes]
262 names = [prefix+s for s in suffixes]
263 for n in names:
263 for n in names:
264 open(n, 'w').close()
264 open(n, 'w').close()
265
265
266 # Check simple completion
266 # Check simple completion
267 c = ip.complete(prefix)[1]
267 c = ip.complete(prefix)[1]
268 nt.assert_equal(c, names)
268 nt.assert_equal(c, names)
269
269
270 # Now check with a function call
270 # Now check with a function call
271 cmd = 'a = f("%s' % prefix
271 cmd = 'a = f("%s' % prefix
272 c = ip.complete(prefix, cmd)[1]
272 c = ip.complete(prefix, cmd)[1]
273 comp = set(prefix+s for s in suffixes)
273 comp = set(prefix+s for s in suffixes)
274 nt.assert_true(comp.issubset(set(c)))
274 nt.assert_true(comp.issubset(set(c)))
275
275
276
276
277 def test_quoted_file_completions():
278 ip = get_ipython()
279 with TemporaryWorkingDirectory():
280 name = "foo'bar"
281 open(name, 'w').close()
282
283 # Don't escape Windows
284 escaped = name if sys.platform == "win32" else "foo\\'bar"
285
286 # Single quote matches embedded single quote
287 text = "open('foo"
288 c = ip.Completer._complete(cursor_line=0,
289 cursor_pos=len(text),
290 full_text=text)[1]
291 nt.assert_equal(c, [escaped])
292
293 # Double quote requires no escape
294 text = 'open("foo'
295 c = ip.Completer._complete(cursor_line=0,
296 cursor_pos=len(text),
297 full_text=text)[1]
298 nt.assert_equal(c, [name])
299
300 # No quote requires an escape
301 text = '%ls foo'
302 c = ip.Completer._complete(cursor_line=0,
303 cursor_pos=len(text),
304 full_text=text)[1]
305 nt.assert_equal(c, [escaped])
306
307
277 def test_jedi():
308 def test_jedi():
278 """
309 """
279 A couple of issue we had with Jedi
310 A couple of issue we had with Jedi
280 """
311 """
281 ip = get_ipython()
312 ip = get_ipython()
282
313
283 def _test_complete(reason, s, comp, start=None, end=None):
314 def _test_complete(reason, s, comp, start=None, end=None):
284 l = len(s)
315 l = len(s)
285 start = start if start is not None else l
316 start = start if start is not None else l
286 end = end if end is not None else l
317 end = end if end is not None else l
287 with provisionalcompleter():
318 with provisionalcompleter():
288 completions = set(ip.Completer.completions(s, l))
319 completions = set(ip.Completer.completions(s, l))
289 assert_in(Completion(start, end, comp), completions, reason)
320 assert_in(Completion(start, end, comp), completions, reason)
290
321
291 def _test_not_complete(reason, s, comp):
322 def _test_not_complete(reason, s, comp):
292 l = len(s)
323 l = len(s)
293 with provisionalcompleter():
324 with provisionalcompleter():
294 completions = set(ip.Completer.completions(s, l))
325 completions = set(ip.Completer.completions(s, l))
295 assert_not_in(Completion(l, l, comp), completions, reason)
326 assert_not_in(Completion(l, l, comp), completions, reason)
296
327
297 import jedi
328 import jedi
298 jedi_version = tuple(int(i) for i in jedi.__version__.split('.')[:3])
329 jedi_version = tuple(int(i) for i in jedi.__version__.split('.')[:3])
299 if jedi_version > (0, 10):
330 if jedi_version > (0, 10):
300 yield _test_complete, 'jedi >0.9 should complete and not crash', 'a=1;a.', 'real'
331 yield _test_complete, 'jedi >0.9 should complete and not crash', 'a=1;a.', 'real'
301 yield _test_complete, 'can infer first argument', 'a=(1,"foo");a[0].', 'real'
332 yield _test_complete, 'can infer first argument', 'a=(1,"foo");a[0].', 'real'
302 yield _test_complete, 'can infer second argument', 'a=(1,"foo");a[1].', 'capitalize'
333 yield _test_complete, 'can infer second argument', 'a=(1,"foo");a[1].', 'capitalize'
303 yield _test_complete, 'cover duplicate completions', 'im', 'import', 0, 2
334 yield _test_complete, 'cover duplicate completions', 'im', 'import', 0, 2
304
335
305 yield _test_not_complete, 'does not mix types', 'a=(1,"foo");a[0].', 'capitalize'
336 yield _test_not_complete, 'does not mix types', 'a=(1,"foo");a[0].', 'capitalize'
306
337
307 def test_deduplicate_completions():
338 def test_deduplicate_completions():
308 """
339 """
309 Test that completions are correctly deduplicated (even if ranges are not the same)
340 Test that completions are correctly deduplicated (even if ranges are not the same)
310 """
341 """
311 ip = get_ipython()
342 ip = get_ipython()
312 ip.ex(textwrap.dedent('''
343 ip.ex(textwrap.dedent('''
313 class Z:
344 class Z:
314 zoo = 1
345 zoo = 1
315 '''))
346 '''))
316 with provisionalcompleter():
347 with provisionalcompleter():
317 l = list(_deduplicate_completions('Z.z', ip.Completer.completions('Z.z', 3)))
348 l = list(_deduplicate_completions('Z.z', ip.Completer.completions('Z.z', 3)))
318
349
319 assert len(l) == 1, 'Completions (Z.z<tab>) correctly deduplicate: %s ' % l
350 assert len(l) == 1, 'Completions (Z.z<tab>) correctly deduplicate: %s ' % l
320 assert l[0].text == 'zoo' # and not `it.accumulate`
351 assert l[0].text == 'zoo' # and not `it.accumulate`
321
352
322
353
323 def test_greedy_completions():
354 def test_greedy_completions():
324 """
355 """
325 Test the capability of the Greedy completer.
356 Test the capability of the Greedy completer.
326
357
327 Most of the test here do not really show off the greedy completer, for proof
358 Most of the test here do not really show off the greedy completer, for proof
328 each of the text bellow now pass with Jedi. The greedy completer is capable of more.
359 each of the text bellow now pass with Jedi. The greedy completer is capable of more.
329
360
330 See the :any:`test_dict_key_completion_contexts`
361 See the :any:`test_dict_key_completion_contexts`
331
362
332 """
363 """
333 ip = get_ipython()
364 ip = get_ipython()
334 ip.ex('a=list(range(5))')
365 ip.ex('a=list(range(5))')
335 _,c = ip.complete('.',line='a[0].')
366 _,c = ip.complete('.',line='a[0].')
336 nt.assert_false('.real' in c,
367 nt.assert_false('.real' in c,
337 "Shouldn't have completed on a[0]: %s"%c)
368 "Shouldn't have completed on a[0]: %s"%c)
338 with greedy_completion(), provisionalcompleter():
369 with greedy_completion(), provisionalcompleter():
339 def _(line, cursor_pos, expect, message, completion):
370 def _(line, cursor_pos, expect, message, completion):
340 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
371 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
341 with provisionalcompleter():
372 with provisionalcompleter():
342 completions = ip.Completer.completions(line, cursor_pos)
373 completions = ip.Completer.completions(line, cursor_pos)
343 nt.assert_in(expect, c, message%c)
374 nt.assert_in(expect, c, message%c)
344 nt.assert_in(completion, completions)
375 nt.assert_in(completion, completions)
345
376
346 yield _, 'a[0].', 5, 'a[0].real', "Should have completed on a[0].: %s", Completion(5,5, 'real')
377 yield _, 'a[0].', 5, 'a[0].real', "Should have completed on a[0].: %s", Completion(5,5, 'real')
347 yield _, 'a[0].r', 6, 'a[0].real', "Should have completed on a[0].r: %s", Completion(5,6, 'real')
378 yield _, 'a[0].r', 6, 'a[0].real', "Should have completed on a[0].r: %s", Completion(5,6, 'real')
348
379
349 if sys.version_info > (3, 4):
380 if sys.version_info > (3, 4):
350 yield _, 'a[0].from_', 10, 'a[0].from_bytes', "Should have completed on a[0].from_: %s", Completion(5, 10, 'from_bytes')
381 yield _, 'a[0].from_', 10, 'a[0].from_bytes', "Should have completed on a[0].from_: %s", Completion(5, 10, 'from_bytes')
351
382
352
383
353 def test_omit__names():
384 def test_omit__names():
354 # also happens to test IPCompleter as a configurable
385 # also happens to test IPCompleter as a configurable
355 ip = get_ipython()
386 ip = get_ipython()
356 ip._hidden_attr = 1
387 ip._hidden_attr = 1
357 ip._x = {}
388 ip._x = {}
358 c = ip.Completer
389 c = ip.Completer
359 ip.ex('ip=get_ipython()')
390 ip.ex('ip=get_ipython()')
360 cfg = Config()
391 cfg = Config()
361 cfg.IPCompleter.omit__names = 0
392 cfg.IPCompleter.omit__names = 0
362 c.update_config(cfg)
393 c.update_config(cfg)
363 with provisionalcompleter():
394 with provisionalcompleter():
364 s,matches = c.complete('ip.')
395 s,matches = c.complete('ip.')
365 completions = set(c.completions('ip.', 3))
396 completions = set(c.completions('ip.', 3))
366
397
367 nt.assert_in('ip.__str__', matches)
398 nt.assert_in('ip.__str__', matches)
368 nt.assert_in(Completion(3, 3, '__str__'), completions)
399 nt.assert_in(Completion(3, 3, '__str__'), completions)
369
400
370 nt.assert_in('ip._hidden_attr', matches)
401 nt.assert_in('ip._hidden_attr', matches)
371 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
402 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
372
403
373
404
374 cfg = Config()
405 cfg = Config()
375 cfg.IPCompleter.omit__names = 1
406 cfg.IPCompleter.omit__names = 1
376 c.update_config(cfg)
407 c.update_config(cfg)
377 with provisionalcompleter():
408 with provisionalcompleter():
378 s,matches = c.complete('ip.')
409 s,matches = c.complete('ip.')
379 completions = set(c.completions('ip.', 3))
410 completions = set(c.completions('ip.', 3))
380
411
381 nt.assert_not_in('ip.__str__', matches)
412 nt.assert_not_in('ip.__str__', matches)
382 nt.assert_not_in(Completion(3,3,'__str__'), completions)
413 nt.assert_not_in(Completion(3,3,'__str__'), completions)
383
414
384 # nt.assert_in('ip._hidden_attr', matches)
415 # nt.assert_in('ip._hidden_attr', matches)
385 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
416 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
386
417
387 cfg = Config()
418 cfg = Config()
388 cfg.IPCompleter.omit__names = 2
419 cfg.IPCompleter.omit__names = 2
389 c.update_config(cfg)
420 c.update_config(cfg)
390 with provisionalcompleter():
421 with provisionalcompleter():
391 s,matches = c.complete('ip.')
422 s,matches = c.complete('ip.')
392 completions = set(c.completions('ip.', 3))
423 completions = set(c.completions('ip.', 3))
393
424
394 nt.assert_not_in('ip.__str__', matches)
425 nt.assert_not_in('ip.__str__', matches)
395 nt.assert_not_in(Completion(3,3,'__str__'), completions)
426 nt.assert_not_in(Completion(3,3,'__str__'), completions)
396
427
397 nt.assert_not_in('ip._hidden_attr', matches)
428 nt.assert_not_in('ip._hidden_attr', matches)
398 nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
429 nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
399
430
400 with provisionalcompleter():
431 with provisionalcompleter():
401 s,matches = c.complete('ip._x.')
432 s,matches = c.complete('ip._x.')
402 completions = set(c.completions('ip._x.', 6))
433 completions = set(c.completions('ip._x.', 6))
403
434
404 nt.assert_in('ip._x.keys', matches)
435 nt.assert_in('ip._x.keys', matches)
405 nt.assert_in(Completion(6,6, "keys"), completions)
436 nt.assert_in(Completion(6,6, "keys"), completions)
406
437
407 del ip._hidden_attr
438 del ip._hidden_attr
408 del ip._x
439 del ip._x
409
440
410
441
411 def test_limit_to__all__False_ok():
442 def test_limit_to__all__False_ok():
412 """
443 """
413 Limit to all is deprecated, once we remove it this test can go away.
444 Limit to all is deprecated, once we remove it this test can go away.
414 """
445 """
415 ip = get_ipython()
446 ip = get_ipython()
416 c = ip.Completer
447 c = ip.Completer
417 ip.ex('class D: x=24')
448 ip.ex('class D: x=24')
418 ip.ex('d=D()')
449 ip.ex('d=D()')
419 cfg = Config()
450 cfg = Config()
420 cfg.IPCompleter.limit_to__all__ = False
451 cfg.IPCompleter.limit_to__all__ = False
421 c.update_config(cfg)
452 c.update_config(cfg)
422 s, matches = c.complete('d.')
453 s, matches = c.complete('d.')
423 nt.assert_in('d.x', matches)
454 nt.assert_in('d.x', matches)
424
455
425
456
426 def test_get__all__entries_ok():
457 def test_get__all__entries_ok():
427 class A(object):
458 class A(object):
428 __all__ = ['x', 1]
459 __all__ = ['x', 1]
429 words = completer.get__all__entries(A())
460 words = completer.get__all__entries(A())
430 nt.assert_equal(words, ['x'])
461 nt.assert_equal(words, ['x'])
431
462
432
463
433 def test_get__all__entries_no__all__ok():
464 def test_get__all__entries_no__all__ok():
434 class A(object):
465 class A(object):
435 pass
466 pass
436 words = completer.get__all__entries(A())
467 words = completer.get__all__entries(A())
437 nt.assert_equal(words, [])
468 nt.assert_equal(words, [])
438
469
439
470
440 def test_func_kw_completions():
471 def test_func_kw_completions():
441 ip = get_ipython()
472 ip = get_ipython()
442 c = ip.Completer
473 c = ip.Completer
443 ip.ex('def myfunc(a=1,b=2): return a+b')
474 ip.ex('def myfunc(a=1,b=2): return a+b')
444 s, matches = c.complete(None, 'myfunc(1,b')
475 s, matches = c.complete(None, 'myfunc(1,b')
445 nt.assert_in('b=', matches)
476 nt.assert_in('b=', matches)
446 # Simulate completing with cursor right after b (pos==10):
477 # Simulate completing with cursor right after b (pos==10):
447 s, matches = c.complete(None, 'myfunc(1,b)', 10)
478 s, matches = c.complete(None, 'myfunc(1,b)', 10)
448 nt.assert_in('b=', matches)
479 nt.assert_in('b=', matches)
449 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
480 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
450 nt.assert_in('b=', matches)
481 nt.assert_in('b=', matches)
451 #builtin function
482 #builtin function
452 s, matches = c.complete(None, 'min(k, k')
483 s, matches = c.complete(None, 'min(k, k')
453 nt.assert_in('key=', matches)
484 nt.assert_in('key=', matches)
454
485
455
486
456 def test_default_arguments_from_docstring():
487 def test_default_arguments_from_docstring():
457 ip = get_ipython()
488 ip = get_ipython()
458 c = ip.Completer
489 c = ip.Completer
459 kwd = c._default_arguments_from_docstring(
490 kwd = c._default_arguments_from_docstring(
460 'min(iterable[, key=func]) -> value')
491 'min(iterable[, key=func]) -> value')
461 nt.assert_equal(kwd, ['key'])
492 nt.assert_equal(kwd, ['key'])
462 #with cython type etc
493 #with cython type etc
463 kwd = c._default_arguments_from_docstring(
494 kwd = c._default_arguments_from_docstring(
464 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
495 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
465 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
496 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
466 #white spaces
497 #white spaces
467 kwd = c._default_arguments_from_docstring(
498 kwd = c._default_arguments_from_docstring(
468 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
499 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
469 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
500 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
470
501
471 def test_line_magics():
502 def test_line_magics():
472 ip = get_ipython()
503 ip = get_ipython()
473 c = ip.Completer
504 c = ip.Completer
474 s, matches = c.complete(None, 'lsmag')
505 s, matches = c.complete(None, 'lsmag')
475 nt.assert_in('%lsmagic', matches)
506 nt.assert_in('%lsmagic', matches)
476 s, matches = c.complete(None, '%lsmag')
507 s, matches = c.complete(None, '%lsmag')
477 nt.assert_in('%lsmagic', matches)
508 nt.assert_in('%lsmagic', matches)
478
509
479
510
480 def test_cell_magics():
511 def test_cell_magics():
481 from IPython.core.magic import register_cell_magic
512 from IPython.core.magic import register_cell_magic
482
513
483 @register_cell_magic
514 @register_cell_magic
484 def _foo_cellm(line, cell):
515 def _foo_cellm(line, cell):
485 pass
516 pass
486
517
487 ip = get_ipython()
518 ip = get_ipython()
488 c = ip.Completer
519 c = ip.Completer
489
520
490 s, matches = c.complete(None, '_foo_ce')
521 s, matches = c.complete(None, '_foo_ce')
491 nt.assert_in('%%_foo_cellm', matches)
522 nt.assert_in('%%_foo_cellm', matches)
492 s, matches = c.complete(None, '%%_foo_ce')
523 s, matches = c.complete(None, '%%_foo_ce')
493 nt.assert_in('%%_foo_cellm', matches)
524 nt.assert_in('%%_foo_cellm', matches)
494
525
495
526
496 def test_line_cell_magics():
527 def test_line_cell_magics():
497 from IPython.core.magic import register_line_cell_magic
528 from IPython.core.magic import register_line_cell_magic
498
529
499 @register_line_cell_magic
530 @register_line_cell_magic
500 def _bar_cellm(line, cell):
531 def _bar_cellm(line, cell):
501 pass
532 pass
502
533
503 ip = get_ipython()
534 ip = get_ipython()
504 c = ip.Completer
535 c = ip.Completer
505
536
506 # The policy here is trickier, see comments in completion code. The
537 # The policy here is trickier, see comments in completion code. The
507 # returned values depend on whether the user passes %% or not explicitly,
538 # returned values depend on whether the user passes %% or not explicitly,
508 # and this will show a difference if the same name is both a line and cell
539 # and this will show a difference if the same name is both a line and cell
509 # magic.
540 # magic.
510 s, matches = c.complete(None, '_bar_ce')
541 s, matches = c.complete(None, '_bar_ce')
511 nt.assert_in('%_bar_cellm', matches)
542 nt.assert_in('%_bar_cellm', matches)
512 nt.assert_in('%%_bar_cellm', matches)
543 nt.assert_in('%%_bar_cellm', matches)
513 s, matches = c.complete(None, '%_bar_ce')
544 s, matches = c.complete(None, '%_bar_ce')
514 nt.assert_in('%_bar_cellm', matches)
545 nt.assert_in('%_bar_cellm', matches)
515 nt.assert_in('%%_bar_cellm', matches)
546 nt.assert_in('%%_bar_cellm', matches)
516 s, matches = c.complete(None, '%%_bar_ce')
547 s, matches = c.complete(None, '%%_bar_ce')
517 nt.assert_not_in('%_bar_cellm', matches)
548 nt.assert_not_in('%_bar_cellm', matches)
518 nt.assert_in('%%_bar_cellm', matches)
549 nt.assert_in('%%_bar_cellm', matches)
519
550
520
551
521 def test_magic_completion_order():
552 def test_magic_completion_order():
522
553
523 ip = get_ipython()
554 ip = get_ipython()
524 c = ip.Completer
555 c = ip.Completer
525
556
526 # Test ordering of magics and non-magics with the same name
557 # Test ordering of magics and non-magics with the same name
527 # We want the non-magic first
558 # We want the non-magic first
528
559
529 # Before importing matplotlib, there should only be one option:
560 # Before importing matplotlib, there should only be one option:
530
561
531 text, matches = c.complete('mat')
562 text, matches = c.complete('mat')
532 nt.assert_equal(matches, ["%matplotlib"])
563 nt.assert_equal(matches, ["%matplotlib"])
533
564
534
565
535 ip.run_cell("matplotlib = 1") # introduce name into namespace
566 ip.run_cell("matplotlib = 1") # introduce name into namespace
536
567
537 # After the import, there should be two options, ordered like this:
568 # After the import, there should be two options, ordered like this:
538 text, matches = c.complete('mat')
569 text, matches = c.complete('mat')
539 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
570 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
540
571
541
572
542 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
573 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
543
574
544 # Order of user variable and line and cell magics with same name:
575 # Order of user variable and line and cell magics with same name:
545 text, matches = c.complete('timeit')
576 text, matches = c.complete('timeit')
546 nt.assert_equal(matches, ["timeit", "%timeit", "%%timeit"])
577 nt.assert_equal(matches, ["timeit", "%timeit", "%%timeit"])
547
578
548 def test_match_dict_keys():
579 def test_match_dict_keys():
549 """
580 """
550 Test that match_dict_keys works on a couple of use case does return what
581 Test that match_dict_keys works on a couple of use case does return what
551 expected, and does not crash
582 expected, and does not crash
552 """
583 """
553 delims = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
584 delims = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
554
585
555
586
556 keys = ['foo', b'far']
587 keys = ['foo', b'far']
557 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2 ,['far'])
588 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2 ,['far'])
558 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2 ,['far'])
589 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2 ,['far'])
559 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2 ,['far'])
590 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2 ,['far'])
560 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2 ,['far'])
591 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2 ,['far'])
561
592
562 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1 ,['foo'])
593 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1 ,['foo'])
563 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1 ,['foo'])
594 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1 ,['foo'])
564 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1 ,['foo'])
595 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1 ,['foo'])
565 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1 ,['foo'])
596 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1 ,['foo'])
566
597
567 match_dict_keys
598 match_dict_keys
568
599
569
600
570 def test_dict_key_completion_string():
601 def test_dict_key_completion_string():
571 """Test dictionary key completion for string keys"""
602 """Test dictionary key completion for string keys"""
572 ip = get_ipython()
603 ip = get_ipython()
573 complete = ip.Completer.complete
604 complete = ip.Completer.complete
574
605
575 ip.user_ns['d'] = {'abc': None}
606 ip.user_ns['d'] = {'abc': None}
576
607
577 # check completion at different stages
608 # check completion at different stages
578 _, matches = complete(line_buffer="d[")
609 _, matches = complete(line_buffer="d[")
579 nt.assert_in("'abc'", matches)
610 nt.assert_in("'abc'", matches)
580 nt.assert_not_in("'abc']", matches)
611 nt.assert_not_in("'abc']", matches)
581
612
582 _, matches = complete(line_buffer="d['")
613 _, matches = complete(line_buffer="d['")
583 nt.assert_in("abc", matches)
614 nt.assert_in("abc", matches)
584 nt.assert_not_in("abc']", matches)
615 nt.assert_not_in("abc']", matches)
585
616
586 _, matches = complete(line_buffer="d['a")
617 _, matches = complete(line_buffer="d['a")
587 nt.assert_in("abc", matches)
618 nt.assert_in("abc", matches)
588 nt.assert_not_in("abc']", matches)
619 nt.assert_not_in("abc']", matches)
589
620
590 # check use of different quoting
621 # check use of different quoting
591 _, matches = complete(line_buffer="d[\"")
622 _, matches = complete(line_buffer="d[\"")
592 nt.assert_in("abc", matches)
623 nt.assert_in("abc", matches)
593 nt.assert_not_in('abc\"]', matches)
624 nt.assert_not_in('abc\"]', matches)
594
625
595 _, matches = complete(line_buffer="d[\"a")
626 _, matches = complete(line_buffer="d[\"a")
596 nt.assert_in("abc", matches)
627 nt.assert_in("abc", matches)
597 nt.assert_not_in('abc\"]', matches)
628 nt.assert_not_in('abc\"]', matches)
598
629
599 # check sensitivity to following context
630 # check sensitivity to following context
600 _, matches = complete(line_buffer="d[]", cursor_pos=2)
631 _, matches = complete(line_buffer="d[]", cursor_pos=2)
601 nt.assert_in("'abc'", matches)
632 nt.assert_in("'abc'", matches)
602
633
603 _, matches = complete(line_buffer="d['']", cursor_pos=3)
634 _, matches = complete(line_buffer="d['']", cursor_pos=3)
604 nt.assert_in("abc", matches)
635 nt.assert_in("abc", matches)
605 nt.assert_not_in("abc'", matches)
636 nt.assert_not_in("abc'", matches)
606 nt.assert_not_in("abc']", matches)
637 nt.assert_not_in("abc']", matches)
607
638
608 # check multiple solutions are correctly returned and that noise is not
639 # check multiple solutions are correctly returned and that noise is not
609 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
640 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
610 5: None}
641 5: None}
611
642
612 _, matches = complete(line_buffer="d['a")
643 _, matches = complete(line_buffer="d['a")
613 nt.assert_in("abc", matches)
644 nt.assert_in("abc", matches)
614 nt.assert_in("abd", matches)
645 nt.assert_in("abd", matches)
615 nt.assert_not_in("bad", matches)
646 nt.assert_not_in("bad", matches)
616 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
647 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
617
648
618 # check escaping and whitespace
649 # check escaping and whitespace
619 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
650 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
620 _, matches = complete(line_buffer="d['a")
651 _, matches = complete(line_buffer="d['a")
621 nt.assert_in("a\\nb", matches)
652 nt.assert_in("a\\nb", matches)
622 nt.assert_in("a\\'b", matches)
653 nt.assert_in("a\\'b", matches)
623 nt.assert_in("a\"b", matches)
654 nt.assert_in("a\"b", matches)
624 nt.assert_in("a word", matches)
655 nt.assert_in("a word", matches)
625 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
656 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
626
657
627 # - can complete on non-initial word of the string
658 # - can complete on non-initial word of the string
628 _, matches = complete(line_buffer="d['a w")
659 _, matches = complete(line_buffer="d['a w")
629 nt.assert_in("word", matches)
660 nt.assert_in("word", matches)
630
661
631 # - understands quote escaping
662 # - understands quote escaping
632 _, matches = complete(line_buffer="d['a\\'")
663 _, matches = complete(line_buffer="d['a\\'")
633 nt.assert_in("b", matches)
664 nt.assert_in("b", matches)
634
665
635 # - default quoting should work like repr
666 # - default quoting should work like repr
636 _, matches = complete(line_buffer="d[")
667 _, matches = complete(line_buffer="d[")
637 nt.assert_in("\"a'b\"", matches)
668 nt.assert_in("\"a'b\"", matches)
638
669
639 # - when opening quote with ", possible to match with unescaped apostrophe
670 # - when opening quote with ", possible to match with unescaped apostrophe
640 _, matches = complete(line_buffer="d[\"a'")
671 _, matches = complete(line_buffer="d[\"a'")
641 nt.assert_in("b", matches)
672 nt.assert_in("b", matches)
642
673
643 # need to not split at delims that readline won't split at
674 # need to not split at delims that readline won't split at
644 if '-' not in ip.Completer.splitter.delims:
675 if '-' not in ip.Completer.splitter.delims:
645 ip.user_ns['d'] = {'before-after': None}
676 ip.user_ns['d'] = {'before-after': None}
646 _, matches = complete(line_buffer="d['before-af")
677 _, matches = complete(line_buffer="d['before-af")
647 nt.assert_in('before-after', matches)
678 nt.assert_in('before-after', matches)
648
679
649 def test_dict_key_completion_contexts():
680 def test_dict_key_completion_contexts():
650 """Test expression contexts in which dict key completion occurs"""
681 """Test expression contexts in which dict key completion occurs"""
651 ip = get_ipython()
682 ip = get_ipython()
652 complete = ip.Completer.complete
683 complete = ip.Completer.complete
653 d = {'abc': None}
684 d = {'abc': None}
654 ip.user_ns['d'] = d
685 ip.user_ns['d'] = d
655
686
656 class C:
687 class C:
657 data = d
688 data = d
658 ip.user_ns['C'] = C
689 ip.user_ns['C'] = C
659 ip.user_ns['get'] = lambda: d
690 ip.user_ns['get'] = lambda: d
660
691
661 def assert_no_completion(**kwargs):
692 def assert_no_completion(**kwargs):
662 _, matches = complete(**kwargs)
693 _, matches = complete(**kwargs)
663 nt.assert_not_in('abc', matches)
694 nt.assert_not_in('abc', matches)
664 nt.assert_not_in('abc\'', matches)
695 nt.assert_not_in('abc\'', matches)
665 nt.assert_not_in('abc\']', matches)
696 nt.assert_not_in('abc\']', matches)
666 nt.assert_not_in('\'abc\'', matches)
697 nt.assert_not_in('\'abc\'', matches)
667 nt.assert_not_in('\'abc\']', matches)
698 nt.assert_not_in('\'abc\']', matches)
668
699
669 def assert_completion(**kwargs):
700 def assert_completion(**kwargs):
670 _, matches = complete(**kwargs)
701 _, matches = complete(**kwargs)
671 nt.assert_in("'abc'", matches)
702 nt.assert_in("'abc'", matches)
672 nt.assert_not_in("'abc']", matches)
703 nt.assert_not_in("'abc']", matches)
673
704
674 # no completion after string closed, even if reopened
705 # no completion after string closed, even if reopened
675 assert_no_completion(line_buffer="d['a'")
706 assert_no_completion(line_buffer="d['a'")
676 assert_no_completion(line_buffer="d[\"a\"")
707 assert_no_completion(line_buffer="d[\"a\"")
677 assert_no_completion(line_buffer="d['a' + ")
708 assert_no_completion(line_buffer="d['a' + ")
678 assert_no_completion(line_buffer="d['a' + '")
709 assert_no_completion(line_buffer="d['a' + '")
679
710
680 # completion in non-trivial expressions
711 # completion in non-trivial expressions
681 assert_completion(line_buffer="+ d[")
712 assert_completion(line_buffer="+ d[")
682 assert_completion(line_buffer="(d[")
713 assert_completion(line_buffer="(d[")
683 assert_completion(line_buffer="C.data[")
714 assert_completion(line_buffer="C.data[")
684
715
685 # greedy flag
716 # greedy flag
686 def assert_completion(**kwargs):
717 def assert_completion(**kwargs):
687 _, matches = complete(**kwargs)
718 _, matches = complete(**kwargs)
688 nt.assert_in("get()['abc']", matches)
719 nt.assert_in("get()['abc']", matches)
689
720
690 assert_no_completion(line_buffer="get()[")
721 assert_no_completion(line_buffer="get()[")
691 with greedy_completion():
722 with greedy_completion():
692 assert_completion(line_buffer="get()[")
723 assert_completion(line_buffer="get()[")
693 assert_completion(line_buffer="get()['")
724 assert_completion(line_buffer="get()['")
694 assert_completion(line_buffer="get()['a")
725 assert_completion(line_buffer="get()['a")
695 assert_completion(line_buffer="get()['ab")
726 assert_completion(line_buffer="get()['ab")
696 assert_completion(line_buffer="get()['abc")
727 assert_completion(line_buffer="get()['abc")
697
728
698
729
699
730
700 def test_dict_key_completion_bytes():
731 def test_dict_key_completion_bytes():
701 """Test handling of bytes in dict key completion"""
732 """Test handling of bytes in dict key completion"""
702 ip = get_ipython()
733 ip = get_ipython()
703 complete = ip.Completer.complete
734 complete = ip.Completer.complete
704
735
705 ip.user_ns['d'] = {'abc': None, b'abd': None}
736 ip.user_ns['d'] = {'abc': None, b'abd': None}
706
737
707 _, matches = complete(line_buffer="d[")
738 _, matches = complete(line_buffer="d[")
708 nt.assert_in("'abc'", matches)
739 nt.assert_in("'abc'", matches)
709 nt.assert_in("b'abd'", matches)
740 nt.assert_in("b'abd'", matches)
710
741
711 if False: # not currently implemented
742 if False: # not currently implemented
712 _, matches = complete(line_buffer="d[b")
743 _, matches = complete(line_buffer="d[b")
713 nt.assert_in("b'abd'", matches)
744 nt.assert_in("b'abd'", matches)
714 nt.assert_not_in("b'abc'", matches)
745 nt.assert_not_in("b'abc'", matches)
715
746
716 _, matches = complete(line_buffer="d[b'")
747 _, matches = complete(line_buffer="d[b'")
717 nt.assert_in("abd", matches)
748 nt.assert_in("abd", matches)
718 nt.assert_not_in("abc", matches)
749 nt.assert_not_in("abc", matches)
719
750
720 _, matches = complete(line_buffer="d[B'")
751 _, matches = complete(line_buffer="d[B'")
721 nt.assert_in("abd", matches)
752 nt.assert_in("abd", matches)
722 nt.assert_not_in("abc", matches)
753 nt.assert_not_in("abc", matches)
723
754
724 _, matches = complete(line_buffer="d['")
755 _, matches = complete(line_buffer="d['")
725 nt.assert_in("abc", matches)
756 nt.assert_in("abc", matches)
726 nt.assert_not_in("abd", matches)
757 nt.assert_not_in("abd", matches)
727
758
728
759
729 def test_dict_key_completion_unicode_py3():
760 def test_dict_key_completion_unicode_py3():
730 """Test handling of unicode in dict key completion"""
761 """Test handling of unicode in dict key completion"""
731 ip = get_ipython()
762 ip = get_ipython()
732 complete = ip.Completer.complete
763 complete = ip.Completer.complete
733
764
734 ip.user_ns['d'] = {u'a\u05d0': None}
765 ip.user_ns['d'] = {u'a\u05d0': None}
735
766
736 # query using escape
767 # query using escape
737 if sys.platform != 'win32':
768 if sys.platform != 'win32':
738 # Known failure on Windows
769 # Known failure on Windows
739 _, matches = complete(line_buffer="d['a\\u05d0")
770 _, matches = complete(line_buffer="d['a\\u05d0")
740 nt.assert_in("u05d0", matches) # tokenized after \\
771 nt.assert_in("u05d0", matches) # tokenized after \\
741
772
742 # query using character
773 # query using character
743 _, matches = complete(line_buffer="d['a\u05d0")
774 _, matches = complete(line_buffer="d['a\u05d0")
744 nt.assert_in(u"a\u05d0", matches)
775 nt.assert_in(u"a\u05d0", matches)
745
776
746 with greedy_completion():
777 with greedy_completion():
747 # query using escape
778 # query using escape
748 _, matches = complete(line_buffer="d['a\\u05d0")
779 _, matches = complete(line_buffer="d['a\\u05d0")
749 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
780 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
750
781
751 # query using character
782 # query using character
752 _, matches = complete(line_buffer="d['a\u05d0")
783 _, matches = complete(line_buffer="d['a\u05d0")
753 nt.assert_in(u"d['a\u05d0']", matches)
784 nt.assert_in(u"d['a\u05d0']", matches)
754
785
755
786
756
787
757 @dec.skip_without('numpy')
788 @dec.skip_without('numpy')
758 def test_struct_array_key_completion():
789 def test_struct_array_key_completion():
759 """Test dict key completion applies to numpy struct arrays"""
790 """Test dict key completion applies to numpy struct arrays"""
760 import numpy
791 import numpy
761 ip = get_ipython()
792 ip = get_ipython()
762 complete = ip.Completer.complete
793 complete = ip.Completer.complete
763 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
794 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
764 _, matches = complete(line_buffer="d['")
795 _, matches = complete(line_buffer="d['")
765 nt.assert_in("hello", matches)
796 nt.assert_in("hello", matches)
766 nt.assert_in("world", matches)
797 nt.assert_in("world", matches)
767 # complete on the numpy struct itself
798 # complete on the numpy struct itself
768 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
799 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
769 ('my_data', '>f4', 5)])
800 ('my_data', '>f4', 5)])
770 x = numpy.zeros(2, dtype=dt)
801 x = numpy.zeros(2, dtype=dt)
771 ip.user_ns['d'] = x[1]
802 ip.user_ns['d'] = x[1]
772 _, matches = complete(line_buffer="d['")
803 _, matches = complete(line_buffer="d['")
773 nt.assert_in("my_head", matches)
804 nt.assert_in("my_head", matches)
774 nt.assert_in("my_data", matches)
805 nt.assert_in("my_data", matches)
775 # complete on a nested level
806 # complete on a nested level
776 with greedy_completion():
807 with greedy_completion():
777 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
808 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
778 _, matches = complete(line_buffer="d[1]['my_head']['")
809 _, matches = complete(line_buffer="d[1]['my_head']['")
779 nt.assert_true(any(["my_dt" in m for m in matches]))
810 nt.assert_true(any(["my_dt" in m for m in matches]))
780 nt.assert_true(any(["my_df" in m for m in matches]))
811 nt.assert_true(any(["my_df" in m for m in matches]))
781
812
782
813
783 @dec.skip_without('pandas')
814 @dec.skip_without('pandas')
784 def test_dataframe_key_completion():
815 def test_dataframe_key_completion():
785 """Test dict key completion applies to pandas DataFrames"""
816 """Test dict key completion applies to pandas DataFrames"""
786 import pandas
817 import pandas
787 ip = get_ipython()
818 ip = get_ipython()
788 complete = ip.Completer.complete
819 complete = ip.Completer.complete
789 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
820 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
790 _, matches = complete(line_buffer="d['")
821 _, matches = complete(line_buffer="d['")
791 nt.assert_in("hello", matches)
822 nt.assert_in("hello", matches)
792 nt.assert_in("world", matches)
823 nt.assert_in("world", matches)
793
824
794
825
795 def test_dict_key_completion_invalids():
826 def test_dict_key_completion_invalids():
796 """Smoke test cases dict key completion can't handle"""
827 """Smoke test cases dict key completion can't handle"""
797 ip = get_ipython()
828 ip = get_ipython()
798 complete = ip.Completer.complete
829 complete = ip.Completer.complete
799
830
800 ip.user_ns['no_getitem'] = None
831 ip.user_ns['no_getitem'] = None
801 ip.user_ns['no_keys'] = []
832 ip.user_ns['no_keys'] = []
802 ip.user_ns['cant_call_keys'] = dict
833 ip.user_ns['cant_call_keys'] = dict
803 ip.user_ns['empty'] = {}
834 ip.user_ns['empty'] = {}
804 ip.user_ns['d'] = {'abc': 5}
835 ip.user_ns['d'] = {'abc': 5}
805
836
806 _, matches = complete(line_buffer="no_getitem['")
837 _, matches = complete(line_buffer="no_getitem['")
807 _, matches = complete(line_buffer="no_keys['")
838 _, matches = complete(line_buffer="no_keys['")
808 _, matches = complete(line_buffer="cant_call_keys['")
839 _, matches = complete(line_buffer="cant_call_keys['")
809 _, matches = complete(line_buffer="empty['")
840 _, matches = complete(line_buffer="empty['")
810 _, matches = complete(line_buffer="name_error['")
841 _, matches = complete(line_buffer="name_error['")
811 _, matches = complete(line_buffer="d['\\") # incomplete escape
842 _, matches = complete(line_buffer="d['\\") # incomplete escape
812
843
813 class KeyCompletable(object):
844 class KeyCompletable(object):
814 def __init__(self, things=()):
845 def __init__(self, things=()):
815 self.things = things
846 self.things = things
816
847
817 def _ipython_key_completions_(self):
848 def _ipython_key_completions_(self):
818 return list(self.things)
849 return list(self.things)
819
850
820 def test_object_key_completion():
851 def test_object_key_completion():
821 ip = get_ipython()
852 ip = get_ipython()
822 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
853 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
823
854
824 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
855 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
825 nt.assert_in('qwerty', matches)
856 nt.assert_in('qwerty', matches)
826 nt.assert_in('qwick', matches)
857 nt.assert_in('qwick', matches)
827
858
828
859
829 def test_tryimport():
860 def test_tryimport():
830 """
861 """
831 Test that try-import don't crash on trailing dot, and import modules before
862 Test that try-import don't crash on trailing dot, and import modules before
832 """
863 """
833 from IPython.core.completerlib import try_import
864 from IPython.core.completerlib import try_import
834 assert(try_import("IPython."))
865 assert(try_import("IPython."))
835
866
836
867
837 def test_aimport_module_completer():
868 def test_aimport_module_completer():
838 ip = get_ipython()
869 ip = get_ipython()
839 _, matches = ip.complete('i', '%aimport i')
870 _, matches = ip.complete('i', '%aimport i')
840 nt.assert_in('io', matches)
871 nt.assert_in('io', matches)
841 nt.assert_not_in('int', matches)
872 nt.assert_not_in('int', matches)
842
873
843 def test_nested_import_module_completer():
874 def test_nested_import_module_completer():
844 ip = get_ipython()
875 ip = get_ipython()
845 _, matches = ip.complete(None, 'import IPython.co', 17)
876 _, matches = ip.complete(None, 'import IPython.co', 17)
846 nt.assert_in('IPython.core', matches)
877 nt.assert_in('IPython.core', matches)
847 nt.assert_not_in('import IPython.core', matches)
878 nt.assert_not_in('import IPython.core', matches)
848 nt.assert_not_in('IPython.display', matches)
879 nt.assert_not_in('IPython.display', matches)
849
880
850 def test_import_module_completer():
881 def test_import_module_completer():
851 ip = get_ipython()
882 ip = get_ipython()
852 _, matches = ip.complete('i', 'import i')
883 _, matches = ip.complete('i', 'import i')
853 nt.assert_in('io', matches)
884 nt.assert_in('io', matches)
854 nt.assert_not_in('int', matches)
885 nt.assert_not_in('int', matches)
855
886
856 def test_from_module_completer():
887 def test_from_module_completer():
857 ip = get_ipython()
888 ip = get_ipython()
858 _, matches = ip.complete('B', 'from io import B', 16)
889 _, matches = ip.complete('B', 'from io import B', 16)
859 nt.assert_in('BytesIO', matches)
890 nt.assert_in('BytesIO', matches)
860 nt.assert_not_in('BaseException', matches)
891 nt.assert_not_in('BaseException', matches)
General Comments 0
You need to be logged in to leave comments. Login now