##// END OF EJS Templates
Fix completions for PTK 1.0
Matthias Bussonnier -
Show More
@@ -1,1332 +1,1335 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Word completion for IPython.
2 """Word completion for IPython.
3
3
4 This module is a fork of the rlcompleter module in the Python standard
4 This module is a fork of the rlcompleter module in the Python standard
5 library. The original enhancements made to rlcompleter have been sent
5 library. The original enhancements made to rlcompleter have been sent
6 upstream and were accepted as of Python 2.3, but we need a lot more
6 upstream and were accepted as of Python 2.3, but we need a lot more
7 functionality specific to IPython, so this module will continue to live as an
7 functionality specific to IPython, so this module will continue to live as an
8 IPython-specific utility.
8 IPython-specific utility.
9
9
10 Original rlcompleter documentation:
10 Original rlcompleter documentation:
11
11
12 This requires the latest extension to the readline module (the
12 This requires the latest extension to the readline module (the
13 completes keywords, built-ins and globals in __main__; when completing
13 completes keywords, built-ins and globals in __main__; when completing
14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
15 completes its attributes.
15 completes its attributes.
16
16
17 It's very cool to do "import string" type "string.", hit the
17 It's very cool to do "import string" type "string.", hit the
18 completion key (twice), and see the list of names defined by the
18 completion key (twice), and see the list of names defined by the
19 string module!
19 string module!
20
20
21 Tip: to use the tab key as the completion key, call
21 Tip: to use the tab key as the completion key, call
22
22
23 readline.parse_and_bind("tab: complete")
23 readline.parse_and_bind("tab: complete")
24
24
25 Notes:
25 Notes:
26
26
27 - Exceptions raised by the completer function are *ignored* (and
27 - Exceptions raised by the completer function are *ignored* (and
28 generally cause the completion to fail). This is a feature -- since
28 generally cause the completion to fail). This is a feature -- since
29 readline sets the tty device in raw (or cbreak) mode, printing a
29 readline sets the tty device in raw (or cbreak) mode, printing a
30 traceback wouldn't work well without some complicated hoopla to save,
30 traceback wouldn't work well without some complicated hoopla to save,
31 reset and restore the tty state.
31 reset and restore the tty state.
32
32
33 - The evaluation of the NAME.NAME... form may cause arbitrary
33 - The evaluation of the NAME.NAME... form may cause arbitrary
34 application defined code to be executed if an object with a
34 application defined code to be executed if an object with a
35 ``__getattr__`` hook is found. Since it is the responsibility of the
35 ``__getattr__`` hook is found. Since it is the responsibility of the
36 application (or the user) to enable this feature, I consider this an
36 application (or the user) to enable this feature, I consider this an
37 acceptable risk. More complicated expressions (e.g. function calls or
37 acceptable risk. More complicated expressions (e.g. function calls or
38 indexing operations) are *not* evaluated.
38 indexing operations) are *not* evaluated.
39
39
40 - GNU readline is also used by the built-in functions input() and
40 - GNU readline is also used by the built-in functions input() and
41 raw_input(), and thus these also benefit/suffer from the completer
41 raw_input(), and thus these also benefit/suffer from the completer
42 features. Clearly an interactive application can benefit by
42 features. Clearly an interactive application can benefit by
43 specifying its own completer function and using raw_input() for all
43 specifying its own completer function and using raw_input() for all
44 its input.
44 its input.
45
45
46 - When the original stdin is not a tty device, GNU readline is never
46 - When the original stdin is not a tty device, GNU readline is never
47 used, and this module (and the readline module) are silently inactive.
47 used, and this module (and the readline module) are silently inactive.
48 """
48 """
49
49
50 # Copyright (c) IPython Development Team.
50 # Copyright (c) IPython Development Team.
51 # Distributed under the terms of the Modified BSD License.
51 # Distributed under the terms of the Modified BSD License.
52 #
52 #
53 # Some of this code originated from rlcompleter in the Python standard library
53 # Some of this code originated from rlcompleter in the Python standard library
54 # Copyright (C) 2001 Python Software Foundation, www.python.org
54 # Copyright (C) 2001 Python Software Foundation, www.python.org
55
55
56 from __future__ import print_function
56 from __future__ import print_function
57
57
58 import __main__
58 import __main__
59 import glob
59 import glob
60 import inspect
60 import inspect
61 import itertools
61 import itertools
62 import keyword
62 import keyword
63 import os
63 import os
64 import re
64 import re
65 import sys
65 import sys
66 import unicodedata
66 import unicodedata
67 import string
67 import string
68
68
69 from traitlets.config.configurable import Configurable
69 from traitlets.config.configurable import Configurable
70 from IPython.core.error import TryNext
70 from IPython.core.error import TryNext
71 from IPython.core.inputsplitter import ESC_MAGIC
71 from IPython.core.inputsplitter import ESC_MAGIC
72 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
72 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
73 from IPython.utils import generics
73 from IPython.utils import generics
74 from IPython.utils.decorators import undoc
74 from IPython.utils.decorators import undoc
75 from IPython.utils.dir2 import dir2, get_real_method
75 from IPython.utils.dir2 import dir2, get_real_method
76 from IPython.utils.process import arg_split
76 from IPython.utils.process import arg_split
77 from IPython.utils.py3compat import builtin_mod, string_types, PY3, cast_unicode_py2
77 from IPython.utils.py3compat import builtin_mod, string_types, PY3, cast_unicode_py2
78 from traitlets import CBool, Enum
78 from traitlets import CBool, Enum
79
79
80 try:
80 try:
81 import jedi
81 import jedi
82 import jedi.api.helpers
82 import jedi.api.helpers
83 import jedi.parser.user_context
83 import jedi.parser.user_context
84 JEDI_INSTALLED = True
84 JEDI_INSTALLED = True
85 except ImportError:
85 except ImportError:
86 JEDI_INSTALLED = False
86 JEDI_INSTALLED = False
87
87
88 #-----------------------------------------------------------------------------
88 #-----------------------------------------------------------------------------
89 # Globals
89 # Globals
90 #-----------------------------------------------------------------------------
90 #-----------------------------------------------------------------------------
91
91
92 # Public API
92 # Public API
93 __all__ = ['Completer','IPCompleter']
93 __all__ = ['Completer','IPCompleter']
94
94
95 if sys.platform == 'win32':
95 if sys.platform == 'win32':
96 PROTECTABLES = ' '
96 PROTECTABLES = ' '
97 else:
97 else:
98 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
98 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
99
99
100
100
101 #-----------------------------------------------------------------------------
101 #-----------------------------------------------------------------------------
102 # Main functions and classes
102 # Main functions and classes
103 #-----------------------------------------------------------------------------
103 #-----------------------------------------------------------------------------
104
104
105 def has_open_quotes(s):
105 def has_open_quotes(s):
106 """Return whether a string has open quotes.
106 """Return whether a string has open quotes.
107
107
108 This simply counts whether the number of quote characters of either type in
108 This simply counts whether the number of quote characters of either type in
109 the string is odd.
109 the string is odd.
110
110
111 Returns
111 Returns
112 -------
112 -------
113 If there is an open quote, the quote character is returned. Else, return
113 If there is an open quote, the quote character is returned. Else, return
114 False.
114 False.
115 """
115 """
116 # We check " first, then ', so complex cases with nested quotes will get
116 # We check " first, then ', so complex cases with nested quotes will get
117 # the " to take precedence.
117 # the " to take precedence.
118 if s.count('"') % 2:
118 if s.count('"') % 2:
119 return '"'
119 return '"'
120 elif s.count("'") % 2:
120 elif s.count("'") % 2:
121 return "'"
121 return "'"
122 else:
122 else:
123 return False
123 return False
124
124
125
125
126 def protect_filename(s):
126 def protect_filename(s):
127 """Escape a string to protect certain characters."""
127 """Escape a string to protect certain characters."""
128
128
129 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
129 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
130 for ch in s])
130 for ch in s])
131
131
132 def expand_user(path):
132 def expand_user(path):
133 """Expand '~'-style usernames in strings.
133 """Expand '~'-style usernames in strings.
134
134
135 This is similar to :func:`os.path.expanduser`, but it computes and returns
135 This is similar to :func:`os.path.expanduser`, but it computes and returns
136 extra information that will be useful if the input was being used in
136 extra information that will be useful if the input was being used in
137 computing completions, and you wish to return the completions with the
137 computing completions, and you wish to return the completions with the
138 original '~' instead of its expanded value.
138 original '~' instead of its expanded value.
139
139
140 Parameters
140 Parameters
141 ----------
141 ----------
142 path : str
142 path : str
143 String to be expanded. If no ~ is present, the output is the same as the
143 String to be expanded. If no ~ is present, the output is the same as the
144 input.
144 input.
145
145
146 Returns
146 Returns
147 -------
147 -------
148 newpath : str
148 newpath : str
149 Result of ~ expansion in the input path.
149 Result of ~ expansion in the input path.
150 tilde_expand : bool
150 tilde_expand : bool
151 Whether any expansion was performed or not.
151 Whether any expansion was performed or not.
152 tilde_val : str
152 tilde_val : str
153 The value that ~ was replaced with.
153 The value that ~ was replaced with.
154 """
154 """
155 # Default values
155 # Default values
156 tilde_expand = False
156 tilde_expand = False
157 tilde_val = ''
157 tilde_val = ''
158 newpath = path
158 newpath = path
159
159
160 if path.startswith('~'):
160 if path.startswith('~'):
161 tilde_expand = True
161 tilde_expand = True
162 rest = len(path)-1
162 rest = len(path)-1
163 newpath = os.path.expanduser(path)
163 newpath = os.path.expanduser(path)
164 if rest:
164 if rest:
165 tilde_val = newpath[:-rest]
165 tilde_val = newpath[:-rest]
166 else:
166 else:
167 tilde_val = newpath
167 tilde_val = newpath
168
168
169 return newpath, tilde_expand, tilde_val
169 return newpath, tilde_expand, tilde_val
170
170
171
171
172 def compress_user(path, tilde_expand, tilde_val):
172 def compress_user(path, tilde_expand, tilde_val):
173 """Does the opposite of expand_user, with its outputs.
173 """Does the opposite of expand_user, with its outputs.
174 """
174 """
175 if tilde_expand:
175 if tilde_expand:
176 return path.replace(tilde_val, '~')
176 return path.replace(tilde_val, '~')
177 else:
177 else:
178 return path
178 return path
179
179
180
180
181
181
182 def completions_sorting_key(word):
182 def completions_sorting_key(word):
183 """key for sorting completions
183 """key for sorting completions
184
184
185 This does several things:
185 This does several things:
186
186
187 - Lowercase all completions, so they are sorted alphabetically with
187 - Lowercase all completions, so they are sorted alphabetically with
188 upper and lower case words mingled
188 upper and lower case words mingled
189 - Demote any completions starting with underscores to the end
189 - Demote any completions starting with underscores to the end
190 - Insert any %magic and %%cellmagic completions in the alphabetical order
190 - Insert any %magic and %%cellmagic completions in the alphabetical order
191 by their name
191 by their name
192 """
192 """
193 # Case insensitive sort
193 # Case insensitive sort
194 word = word.lower()
194 word = word.lower()
195
195
196 prio1, prio2 = 0, 0
196 prio1, prio2 = 0, 0
197
197
198 if word.startswith('__'):
198 if word.startswith('__'):
199 prio1 = 2
199 prio1 = 2
200 elif word.startswith('_'):
200 elif word.startswith('_'):
201 prio1 = 1
201 prio1 = 1
202
202
203 if word.endswith('='):
203 if word.endswith('='):
204 prio1 = -1
204 prio1 = -1
205
205
206 if word.startswith('%%'):
206 if word.startswith('%%'):
207 # If there's another % in there, this is something else, so leave it alone
207 # If there's another % in there, this is something else, so leave it alone
208 if not "%" in word[2:]:
208 if not "%" in word[2:]:
209 word = word[2:]
209 word = word[2:]
210 prio2 = 2
210 prio2 = 2
211 elif word.startswith('%'):
211 elif word.startswith('%'):
212 if not "%" in word[1:]:
212 if not "%" in word[1:]:
213 word = word[1:]
213 word = word[1:]
214 prio2 = 1
214 prio2 = 1
215
215
216 return prio1, word, prio2
216 return prio1, word, prio2
217
217
218
218
219 @undoc
219 @undoc
220 class Bunch(object): pass
220 class Bunch(object): pass
221
221
222
222
223 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
223 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
224 GREEDY_DELIMS = ' =\r\n'
224 GREEDY_DELIMS = ' =\r\n'
225
225
226
226
227 class CompletionSplitter(object):
227 class CompletionSplitter(object):
228 """An object to split an input line in a manner similar to readline.
228 """An object to split an input line in a manner similar to readline.
229
229
230 By having our own implementation, we can expose readline-like completion in
230 By having our own implementation, we can expose readline-like completion in
231 a uniform manner to all frontends. This object only needs to be given the
231 a uniform manner to all frontends. This object only needs to be given the
232 line of text to be split and the cursor position on said line, and it
232 line of text to be split and the cursor position on said line, and it
233 returns the 'word' to be completed on at the cursor after splitting the
233 returns the 'word' to be completed on at the cursor after splitting the
234 entire line.
234 entire line.
235
235
236 What characters are used as splitting delimiters can be controlled by
236 What characters are used as splitting delimiters can be controlled by
237 setting the `delims` attribute (this is a property that internally
237 setting the `delims` attribute (this is a property that internally
238 automatically builds the necessary regular expression)"""
238 automatically builds the necessary regular expression)"""
239
239
240 # Private interface
240 # Private interface
241
241
242 # A string of delimiter characters. The default value makes sense for
242 # A string of delimiter characters. The default value makes sense for
243 # IPython's most typical usage patterns.
243 # IPython's most typical usage patterns.
244 _delims = DELIMS
244 _delims = DELIMS
245
245
246 # The expression (a normal string) to be compiled into a regular expression
246 # The expression (a normal string) to be compiled into a regular expression
247 # for actual splitting. We store it as an attribute mostly for ease of
247 # for actual splitting. We store it as an attribute mostly for ease of
248 # debugging, since this type of code can be so tricky to debug.
248 # debugging, since this type of code can be so tricky to debug.
249 _delim_expr = None
249 _delim_expr = None
250
250
251 # The regular expression that does the actual splitting
251 # The regular expression that does the actual splitting
252 _delim_re = None
252 _delim_re = None
253
253
254 def __init__(self, delims=None):
254 def __init__(self, delims=None):
255 delims = CompletionSplitter._delims if delims is None else delims
255 delims = CompletionSplitter._delims if delims is None else delims
256 self.delims = delims
256 self.delims = delims
257
257
258 @property
258 @property
259 def delims(self):
259 def delims(self):
260 """Return the string of delimiter characters."""
260 """Return the string of delimiter characters."""
261 return self._delims
261 return self._delims
262
262
263 @delims.setter
263 @delims.setter
264 def delims(self, delims):
264 def delims(self, delims):
265 """Set the delimiters for line splitting."""
265 """Set the delimiters for line splitting."""
266 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
266 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
267 self._delim_re = re.compile(expr)
267 self._delim_re = re.compile(expr)
268 self._delims = delims
268 self._delims = delims
269 self._delim_expr = expr
269 self._delim_expr = expr
270
270
271 def split_line(self, line, cursor_pos=None):
271 def split_line(self, line, cursor_pos=None):
272 """Split a line of text with a cursor at the given position.
272 """Split a line of text with a cursor at the given position.
273 """
273 """
274 l = line if cursor_pos is None else line[:cursor_pos]
274 l = line if cursor_pos is None else line[:cursor_pos]
275 return self._delim_re.split(l)[-1]
275 return self._delim_re.split(l)[-1]
276
276
277
277
278 class Completer(Configurable):
278 class Completer(Configurable):
279
279
280 greedy = CBool(False, config=True,
280 greedy = CBool(False, config=True,
281 help="""Activate greedy completion
281 help="""Activate greedy completion
282 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
282 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
283
283
284 This will enable completion on elements of lists, results of function calls, etc.,
284 This will enable completion on elements of lists, results of function calls, etc.,
285 but can be unsafe because the code is actually evaluated on TAB.
285 but can be unsafe because the code is actually evaluated on TAB.
286 """
286 """
287 )
287 )
288
288
289
289
290 def __init__(self, namespace=None, global_namespace=None, **kwargs):
290 def __init__(self, namespace=None, global_namespace=None, **kwargs):
291 """Create a new completer for the command line.
291 """Create a new completer for the command line.
292
292
293 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
293 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
294
294
295 If unspecified, the default namespace where completions are performed
295 If unspecified, the default namespace where completions are performed
296 is __main__ (technically, __main__.__dict__). Namespaces should be
296 is __main__ (technically, __main__.__dict__). Namespaces should be
297 given as dictionaries.
297 given as dictionaries.
298
298
299 An optional second namespace can be given. This allows the completer
299 An optional second namespace can be given. This allows the completer
300 to handle cases where both the local and global scopes need to be
300 to handle cases where both the local and global scopes need to be
301 distinguished.
301 distinguished.
302
302
303 Completer instances should be used as the completion mechanism of
303 Completer instances should be used as the completion mechanism of
304 readline via the set_completer() call:
304 readline via the set_completer() call:
305
305
306 readline.set_completer(Completer(my_namespace).complete)
306 readline.set_completer(Completer(my_namespace).complete)
307 """
307 """
308
308
309 # Don't bind to namespace quite yet, but flag whether the user wants a
309 # Don't bind to namespace quite yet, but flag whether the user wants a
310 # specific namespace or to use __main__.__dict__. This will allow us
310 # specific namespace or to use __main__.__dict__. This will allow us
311 # to bind to __main__.__dict__ at completion time, not now.
311 # to bind to __main__.__dict__ at completion time, not now.
312 if namespace is None:
312 if namespace is None:
313 self.use_main_ns = 1
313 self.use_main_ns = 1
314 else:
314 else:
315 self.use_main_ns = 0
315 self.use_main_ns = 0
316 self.namespace = namespace
316 self.namespace = namespace
317
317
318 # The global namespace, if given, can be bound directly
318 # The global namespace, if given, can be bound directly
319 if global_namespace is None:
319 if global_namespace is None:
320 self.global_namespace = {}
320 self.global_namespace = {}
321 else:
321 else:
322 self.global_namespace = global_namespace
322 self.global_namespace = global_namespace
323
323
324 super(Completer, self).__init__(**kwargs)
324 super(Completer, self).__init__(**kwargs)
325
325
326 def complete(self, text, state):
326 def complete(self, text, state):
327 """Return the next possible completion for 'text'.
327 """Return the next possible completion for 'text'.
328
328
329 This is called successively with state == 0, 1, 2, ... until it
329 This is called successively with state == 0, 1, 2, ... until it
330 returns None. The completion should begin with 'text'.
330 returns None. The completion should begin with 'text'.
331
331
332 """
332 """
333 if self.use_main_ns:
333 if self.use_main_ns:
334 self.namespace = __main__.__dict__
334 self.namespace = __main__.__dict__
335
335
336 if state == 0:
336 if state == 0:
337 if "." in text:
337 if "." in text:
338 self.matches = self.attr_matches(text)
338 self.matches = self.attr_matches(text)
339 else:
339 else:
340 self.matches = self.global_matches(text)
340 self.matches = self.global_matches(text)
341 try:
341 try:
342 return self.matches[state]
342 return self.matches[state]
343 except IndexError:
343 except IndexError:
344 return None
344 return None
345
345
346 def global_matches(self, text):
346 def global_matches(self, text):
347 """Compute matches when text is a simple name.
347 """Compute matches when text is a simple name.
348
348
349 Return a list of all keywords, built-in functions and names currently
349 Return a list of all keywords, built-in functions and names currently
350 defined in self.namespace or self.global_namespace that match.
350 defined in self.namespace or self.global_namespace that match.
351
351
352 """
352 """
353 matches = []
353 matches = []
354 match_append = matches.append
354 match_append = matches.append
355 n = len(text)
355 n = len(text)
356 for lst in [keyword.kwlist,
356 for lst in [keyword.kwlist,
357 builtin_mod.__dict__.keys(),
357 builtin_mod.__dict__.keys(),
358 self.namespace.keys(),
358 self.namespace.keys(),
359 self.global_namespace.keys()]:
359 self.global_namespace.keys()]:
360 for word in lst:
360 for word in lst:
361 if word[:n] == text and word != "__builtins__":
361 if word[:n] == text and word != "__builtins__":
362 match_append(word)
362 match_append(word)
363 return [cast_unicode_py2(m) for m in matches]
363 return [cast_unicode_py2(m) for m in matches]
364
364
365 def attr_matches(self, text):
365 def attr_matches(self, text):
366 """Compute matches when text contains a dot.
366 """Compute matches when text contains a dot.
367
367
368 Assuming the text is of the form NAME.NAME....[NAME], and is
368 Assuming the text is of the form NAME.NAME....[NAME], and is
369 evaluatable in self.namespace or self.global_namespace, it will be
369 evaluatable in self.namespace or self.global_namespace, it will be
370 evaluated and its attributes (as revealed by dir()) are used as
370 evaluated and its attributes (as revealed by dir()) are used as
371 possible completions. (For class instances, class members are are
371 possible completions. (For class instances, class members are are
372 also considered.)
372 also considered.)
373
373
374 WARNING: this can still invoke arbitrary C code, if an object
374 WARNING: this can still invoke arbitrary C code, if an object
375 with a __getattr__ hook is evaluated.
375 with a __getattr__ hook is evaluated.
376
376
377 """
377 """
378
378
379 # Another option, seems to work great. Catches things like ''.<tab>
379 # Another option, seems to work great. Catches things like ''.<tab>
380 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
380 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
381
381
382 if m:
382 if m:
383 expr, attr = m.group(1, 3)
383 expr, attr = m.group(1, 3)
384 elif self.greedy:
384 elif self.greedy:
385 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
385 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
386 if not m2:
386 if not m2:
387 return []
387 return []
388 expr, attr = m2.group(1,2)
388 expr, attr = m2.group(1,2)
389 else:
389 else:
390 return []
390 return []
391
391
392 try:
392 try:
393 obj = eval(expr, self.namespace)
393 obj = eval(expr, self.namespace)
394 except:
394 except:
395 try:
395 try:
396 obj = eval(expr, self.global_namespace)
396 obj = eval(expr, self.global_namespace)
397 except:
397 except:
398 return []
398 return []
399
399
400 if self.limit_to__all__ and hasattr(obj, '__all__'):
400 if self.limit_to__all__ and hasattr(obj, '__all__'):
401 words = get__all__entries(obj)
401 words = get__all__entries(obj)
402 else:
402 else:
403 words = dir2(obj)
403 words = dir2(obj)
404
404
405 try:
405 try:
406 words = generics.complete_object(obj, words)
406 words = generics.complete_object(obj, words)
407 except TryNext:
407 except TryNext:
408 pass
408 pass
409 except Exception:
409 except Exception:
410 # Silence errors from completion function
410 # Silence errors from completion function
411 #raise # dbg
411 #raise # dbg
412 pass
412 pass
413 # Build match list to return
413 # Build match list to return
414 n = len(attr)
414 n = len(attr)
415 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
415 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
416
416
417
417
418 def get__all__entries(obj):
418 def get__all__entries(obj):
419 """returns the strings in the __all__ attribute"""
419 """returns the strings in the __all__ attribute"""
420 try:
420 try:
421 words = getattr(obj, '__all__')
421 words = getattr(obj, '__all__')
422 except:
422 except:
423 return []
423 return []
424
424
425 return [cast_unicode_py2(w) for w in words if isinstance(w, string_types)]
425 return [cast_unicode_py2(w) for w in words if isinstance(w, string_types)]
426
426
427
427
428 def match_dict_keys(keys, prefix, delims):
428 def match_dict_keys(keys, prefix, delims):
429 """Used by dict_key_matches, matching the prefix to a list of keys"""
429 """Used by dict_key_matches, matching the prefix to a list of keys"""
430 if not prefix:
430 if not prefix:
431 return None, 0, [repr(k) for k in keys
431 return None, 0, [repr(k) for k in keys
432 if isinstance(k, (string_types, bytes))]
432 if isinstance(k, (string_types, bytes))]
433 quote_match = re.search('["\']', prefix)
433 quote_match = re.search('["\']', prefix)
434 quote = quote_match.group()
434 quote = quote_match.group()
435 try:
435 try:
436 prefix_str = eval(prefix + quote, {})
436 prefix_str = eval(prefix + quote, {})
437 except Exception:
437 except Exception:
438 return None, 0, []
438 return None, 0, []
439
439
440 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
440 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
441 token_match = re.search(pattern, prefix, re.UNICODE)
441 token_match = re.search(pattern, prefix, re.UNICODE)
442 token_start = token_match.start()
442 token_start = token_match.start()
443 token_prefix = token_match.group()
443 token_prefix = token_match.group()
444
444
445 # TODO: support bytes in Py3k
445 # TODO: support bytes in Py3k
446 matched = []
446 matched = []
447 for key in keys:
447 for key in keys:
448 try:
448 try:
449 if not key.startswith(prefix_str):
449 if not key.startswith(prefix_str):
450 continue
450 continue
451 except (AttributeError, TypeError, UnicodeError):
451 except (AttributeError, TypeError, UnicodeError):
452 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
452 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
453 continue
453 continue
454
454
455 # reformat remainder of key to begin with prefix
455 # reformat remainder of key to begin with prefix
456 rem = key[len(prefix_str):]
456 rem = key[len(prefix_str):]
457 # force repr wrapped in '
457 # force repr wrapped in '
458 rem_repr = repr(rem + '"')
458 rem_repr = repr(rem + '"')
459 if rem_repr.startswith('u') and prefix[0] not in 'uU':
459 if rem_repr.startswith('u') and prefix[0] not in 'uU':
460 # Found key is unicode, but prefix is Py2 string.
460 # Found key is unicode, but prefix is Py2 string.
461 # Therefore attempt to interpret key as string.
461 # Therefore attempt to interpret key as string.
462 try:
462 try:
463 rem_repr = repr(rem.encode('ascii') + '"')
463 rem_repr = repr(rem.encode('ascii') + '"')
464 except UnicodeEncodeError:
464 except UnicodeEncodeError:
465 continue
465 continue
466
466
467 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
467 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
468 if quote == '"':
468 if quote == '"':
469 # The entered prefix is quoted with ",
469 # The entered prefix is quoted with ",
470 # but the match is quoted with '.
470 # but the match is quoted with '.
471 # A contained " hence needs escaping for comparison:
471 # A contained " hence needs escaping for comparison:
472 rem_repr = rem_repr.replace('"', '\\"')
472 rem_repr = rem_repr.replace('"', '\\"')
473
473
474 # then reinsert prefix from start of token
474 # then reinsert prefix from start of token
475 matched.append('%s%s' % (token_prefix, rem_repr))
475 matched.append('%s%s' % (token_prefix, rem_repr))
476 return quote, token_start, matched
476 return quote, token_start, matched
477
477
478
478
479 def _safe_isinstance(obj, module, class_name):
479 def _safe_isinstance(obj, module, class_name):
480 """Checks if obj is an instance of module.class_name if loaded
480 """Checks if obj is an instance of module.class_name if loaded
481 """
481 """
482 return (module in sys.modules and
482 return (module in sys.modules and
483 isinstance(obj, getattr(__import__(module), class_name)))
483 isinstance(obj, getattr(__import__(module), class_name)))
484
484
485
485
486 def back_unicode_name_matches(text):
486 def back_unicode_name_matches(text):
487 u"""Match unicode characters back to unicode name
487 u"""Match unicode characters back to unicode name
488
488
489 This does β˜ƒ -> \\snowman
489 This does β˜ƒ -> \\snowman
490
490
491 Note that snowman is not a valid python3 combining character but will be expanded.
491 Note that snowman is not a valid python3 combining character but will be expanded.
492 Though it will not recombine back to the snowman character by the completion machinery.
492 Though it will not recombine back to the snowman character by the completion machinery.
493
493
494 This will not either back-complete standard sequences like \\n, \\b ...
494 This will not either back-complete standard sequences like \\n, \\b ...
495
495
496 Used on Python 3 only.
496 Used on Python 3 only.
497 """
497 """
498 if len(text)<2:
498 if len(text)<2:
499 return u'', ()
499 return u'', ()
500 maybe_slash = text[-2]
500 maybe_slash = text[-2]
501 if maybe_slash != '\\':
501 if maybe_slash != '\\':
502 return u'', ()
502 return u'', ()
503
503
504 char = text[-1]
504 char = text[-1]
505 # no expand on quote for completion in strings.
505 # no expand on quote for completion in strings.
506 # nor backcomplete standard ascii keys
506 # nor backcomplete standard ascii keys
507 if char in string.ascii_letters or char in ['"',"'"]:
507 if char in string.ascii_letters or char in ['"',"'"]:
508 return u'', ()
508 return u'', ()
509 try :
509 try :
510 unic = unicodedata.name(char)
510 unic = unicodedata.name(char)
511 return '\\'+char,['\\'+unic]
511 return '\\'+char,['\\'+unic]
512 except KeyError as e:
512 except KeyError as e:
513 pass
513 pass
514 return u'', ()
514 return u'', ()
515
515
516 def back_latex_name_matches(text):
516 def back_latex_name_matches(text):
517 u"""Match latex characters back to unicode name
517 u"""Match latex characters back to unicode name
518
518
519 This does ->\\sqrt
519 This does ->\\sqrt
520
520
521 Used on Python 3 only.
521 Used on Python 3 only.
522 """
522 """
523 if len(text)<2:
523 if len(text)<2:
524 return u'', ()
524 return u'', ()
525 maybe_slash = text[-2]
525 maybe_slash = text[-2]
526 if maybe_slash != '\\':
526 if maybe_slash != '\\':
527 return u'', ()
527 return u'', ()
528
528
529
529
530 char = text[-1]
530 char = text[-1]
531 # no expand on quote for completion in strings.
531 # no expand on quote for completion in strings.
532 # nor backcomplete standard ascii keys
532 # nor backcomplete standard ascii keys
533 if char in string.ascii_letters or char in ['"',"'"]:
533 if char in string.ascii_letters or char in ['"',"'"]:
534 return u'', ()
534 return u'', ()
535 try :
535 try :
536 latex = reverse_latex_symbol[char]
536 latex = reverse_latex_symbol[char]
537 # '\\' replace the \ as well
537 # '\\' replace the \ as well
538 return '\\'+char,[latex]
538 return '\\'+char,[latex]
539 except KeyError as e:
539 except KeyError as e:
540 pass
540 pass
541 return u'', ()
541 return u'', ()
542
542
543
543
544 class IPCompleter(Completer):
544 class IPCompleter(Completer):
545 """Extension of the completer class with IPython-specific features"""
545 """Extension of the completer class with IPython-specific features"""
546
546
547 def _greedy_changed(self, name, old, new):
547 def _greedy_changed(self, name, old, new):
548 """update the splitter and readline delims when greedy is changed"""
548 """update the splitter and readline delims when greedy is changed"""
549 if new:
549 if new:
550 self.splitter.delims = GREEDY_DELIMS
550 self.splitter.delims = GREEDY_DELIMS
551 else:
551 else:
552 self.splitter.delims = DELIMS
552 self.splitter.delims = DELIMS
553
553
554 if self.readline:
554 if self.readline:
555 self.readline.set_completer_delims(self.splitter.delims)
555 self.readline.set_completer_delims(self.splitter.delims)
556
556
557 merge_completions = CBool(True, config=True,
557 merge_completions = CBool(True, config=True,
558 help="""Whether to merge completion results into a single list
558 help="""Whether to merge completion results into a single list
559
559
560 If False, only the completion results from the first non-empty
560 If False, only the completion results from the first non-empty
561 completer will be returned.
561 completer will be returned.
562 """
562 """
563 )
563 )
564 omit__names = Enum((0,1,2), default_value=2, config=True,
564 omit__names = Enum((0,1,2), default_value=2, config=True,
565 help="""Instruct the completer to omit private method names
565 help="""Instruct the completer to omit private method names
566
566
567 Specifically, when completing on ``object.<tab>``.
567 Specifically, when completing on ``object.<tab>``.
568
568
569 When 2 [default]: all names that start with '_' will be excluded.
569 When 2 [default]: all names that start with '_' will be excluded.
570
570
571 When 1: all 'magic' names (``__foo__``) will be excluded.
571 When 1: all 'magic' names (``__foo__``) will be excluded.
572
572
573 When 0: nothing will be excluded.
573 When 0: nothing will be excluded.
574 """
574 """
575 )
575 )
576 limit_to__all__ = CBool(default_value=False, config=True,
576 limit_to__all__ = CBool(default_value=False, config=True,
577 help="""
577 help="""
578 DEPRECATED as of version 5.0.
578 DEPRECATED as of version 5.0.
579
579
580 Instruct the completer to use __all__ for the completion
580 Instruct the completer to use __all__ for the completion
581
581
582 Specifically, when completing on ``object.<tab>``.
582 Specifically, when completing on ``object.<tab>``.
583
583
584 When True: only those names in obj.__all__ will be included.
584 When True: only those names in obj.__all__ will be included.
585
585
586 When False [default]: the __all__ attribute is ignored
586 When False [default]: the __all__ attribute is ignored
587 """
587 """
588 )
588 )
589 use_jedi_completions = CBool(default_value=JEDI_INSTALLED, config=True,
589 use_jedi_completions = CBool(default_value=JEDI_INSTALLED, config=True,
590 help="""Use Jedi to generate autocompletions.
590 help="""Use Jedi to generate autocompletions.
591 """)
591 """)
592
592
593 def __init__(self, shell=None, namespace=None, global_namespace=None,
593 def __init__(self, shell=None, namespace=None, global_namespace=None,
594 use_readline=True, config=None, **kwargs):
594 use_readline=True, config=None, **kwargs):
595 """IPCompleter() -> completer
595 """IPCompleter() -> completer
596
596
597 Return a completer object suitable for use by the readline library
597 Return a completer object suitable for use by the readline library
598 via readline.set_completer().
598 via readline.set_completer().
599
599
600 Inputs:
600 Inputs:
601
601
602 - shell: a pointer to the ipython shell itself. This is needed
602 - shell: a pointer to the ipython shell itself. This is needed
603 because this completer knows about magic functions, and those can
603 because this completer knows about magic functions, and those can
604 only be accessed via the ipython instance.
604 only be accessed via the ipython instance.
605
605
606 - namespace: an optional dict where completions are performed.
606 - namespace: an optional dict where completions are performed.
607
607
608 - global_namespace: secondary optional dict for completions, to
608 - global_namespace: secondary optional dict for completions, to
609 handle cases (such as IPython embedded inside functions) where
609 handle cases (such as IPython embedded inside functions) where
610 both Python scopes are visible.
610 both Python scopes are visible.
611
611
612 use_readline : bool, optional
612 use_readline : bool, optional
613 If true, use the readline library. This completer can still function
613 If true, use the readline library. This completer can still function
614 without readline, though in that case callers must provide some extra
614 without readline, though in that case callers must provide some extra
615 information on each call about the current line."""
615 information on each call about the current line."""
616
616
617 self.magic_escape = ESC_MAGIC
617 self.magic_escape = ESC_MAGIC
618 self.splitter = CompletionSplitter()
618 self.splitter = CompletionSplitter()
619
619
620 # Readline configuration, only used by the rlcompleter method.
620 # Readline configuration, only used by the rlcompleter method.
621 if use_readline:
621 if use_readline:
622 # We store the right version of readline so that later code
622 # We store the right version of readline so that later code
623 import IPython.utils.rlineimpl as readline
623 import IPython.utils.rlineimpl as readline
624 self.readline = readline
624 self.readline = readline
625 else:
625 else:
626 self.readline = None
626 self.readline = None
627
627
628 # _greedy_changed() depends on splitter and readline being defined:
628 # _greedy_changed() depends on splitter and readline being defined:
629 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
629 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
630 config=config, **kwargs)
630 config=config, **kwargs)
631
631
632 # List where completion matches will be stored
632 # List where completion matches will be stored
633 self.matches = []
633 self.matches = []
634 self.shell = shell
634 self.shell = shell
635 # Regexp to split filenames with spaces in them
635 # Regexp to split filenames with spaces in them
636 self.space_name_re = re.compile(r'([^\\] )')
636 self.space_name_re = re.compile(r'([^\\] )')
637 # Hold a local ref. to glob.glob for speed
637 # Hold a local ref. to glob.glob for speed
638 self.glob = glob.glob
638 self.glob = glob.glob
639
639
640 # Determine if we are running on 'dumb' terminals, like (X)Emacs
640 # Determine if we are running on 'dumb' terminals, like (X)Emacs
641 # buffers, to avoid completion problems.
641 # buffers, to avoid completion problems.
642 term = os.environ.get('TERM','xterm')
642 term = os.environ.get('TERM','xterm')
643 self.dumb_terminal = term in ['dumb','emacs']
643 self.dumb_terminal = term in ['dumb','emacs']
644
644
645 # Special handling of backslashes needed in win32 platforms
645 # Special handling of backslashes needed in win32 platforms
646 if sys.platform == "win32":
646 if sys.platform == "win32":
647 self.clean_glob = self._clean_glob_win32
647 self.clean_glob = self._clean_glob_win32
648 else:
648 else:
649 self.clean_glob = self._clean_glob
649 self.clean_glob = self._clean_glob
650
650
651 #regexp to parse docstring for function signature
651 #regexp to parse docstring for function signature
652 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
652 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
653 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
653 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
654 #use this if positional argument name is also needed
654 #use this if positional argument name is also needed
655 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
655 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
656
656
657 # All active matcher routines for completion
657 # All active matcher routines for completion
658 self.matchers = [
658 self.matchers = [
659 self.file_matches,
659 self.file_matches,
660 self.magic_matches,
660 self.magic_matches,
661 self.python_func_kw_matches,
661 self.python_func_kw_matches,
662 self.dict_key_matches,
662 self.dict_key_matches,
663 ]
663 ]
664
664
665 def all_completions(self, text):
665 def all_completions(self, text):
666 """
666 """
667 Wrapper around the complete method for the benefit of emacs
667 Wrapper around the complete method for the benefit of emacs
668 and pydb.
668 and pydb.
669 """
669 """
670 return self.complete(text)[1]
670 return self.complete(text)[1]
671
671
672 def _clean_glob(self, text):
672 def _clean_glob(self, text):
673 return self.glob("%s*" % text)
673 return self.glob("%s*" % text)
674
674
675 def _clean_glob_win32(self,text):
675 def _clean_glob_win32(self,text):
676 return [f.replace("\\","/")
676 return [f.replace("\\","/")
677 for f in self.glob("%s*" % text)]
677 for f in self.glob("%s*" % text)]
678
678
679 def file_matches(self, text):
679 def file_matches(self, text):
680 """Match filenames, expanding ~USER type strings.
680 """Match filenames, expanding ~USER type strings.
681
681
682 Most of the seemingly convoluted logic in this completer is an
682 Most of the seemingly convoluted logic in this completer is an
683 attempt to handle filenames with spaces in them. And yet it's not
683 attempt to handle filenames with spaces in them. And yet it's not
684 quite perfect, because Python's readline doesn't expose all of the
684 quite perfect, because Python's readline doesn't expose all of the
685 GNU readline details needed for this to be done correctly.
685 GNU readline details needed for this to be done correctly.
686
686
687 For a filename with a space in it, the printed completions will be
687 For a filename with a space in it, the printed completions will be
688 only the parts after what's already been typed (instead of the
688 only the parts after what's already been typed (instead of the
689 full completions, as is normally done). I don't think with the
689 full completions, as is normally done). I don't think with the
690 current (as of Python 2.3) Python readline it's possible to do
690 current (as of Python 2.3) Python readline it's possible to do
691 better."""
691 better."""
692
692
693 # chars that require escaping with backslash - i.e. chars
693 # chars that require escaping with backslash - i.e. chars
694 # that readline treats incorrectly as delimiters, but we
694 # that readline treats incorrectly as delimiters, but we
695 # don't want to treat as delimiters in filename matching
695 # don't want to treat as delimiters in filename matching
696 # when escaped with backslash
696 # when escaped with backslash
697 if text.startswith('!'):
697 if text.startswith('!'):
698 text = text[1:]
698 text = text[1:]
699 text_prefix = u'!'
699 text_prefix = u'!'
700 else:
700 else:
701 text_prefix = u''
701 text_prefix = u''
702
702
703 text_until_cursor = self.text_until_cursor
703 text_until_cursor = self.text_until_cursor
704 # track strings with open quotes
704 # track strings with open quotes
705 open_quotes = has_open_quotes(text_until_cursor)
705 open_quotes = has_open_quotes(text_until_cursor)
706
706
707 if '(' in text_until_cursor or '[' in text_until_cursor:
707 if '(' in text_until_cursor or '[' in text_until_cursor:
708 lsplit = text
708 lsplit = text
709 else:
709 else:
710 try:
710 try:
711 # arg_split ~ shlex.split, but with unicode bugs fixed by us
711 # arg_split ~ shlex.split, but with unicode bugs fixed by us
712 lsplit = arg_split(text_until_cursor)[-1]
712 lsplit = arg_split(text_until_cursor)[-1]
713 except ValueError:
713 except ValueError:
714 # typically an unmatched ", or backslash without escaped char.
714 # typically an unmatched ", or backslash without escaped char.
715 if open_quotes:
715 if open_quotes:
716 lsplit = text_until_cursor.split(open_quotes)[-1]
716 lsplit = text_until_cursor.split(open_quotes)[-1]
717 else:
717 else:
718 return []
718 return []
719 except IndexError:
719 except IndexError:
720 # tab pressed on empty line
720 # tab pressed on empty line
721 lsplit = ""
721 lsplit = ""
722
722
723 if not open_quotes and lsplit != protect_filename(lsplit):
723 if not open_quotes and lsplit != protect_filename(lsplit):
724 # if protectables are found, do matching on the whole escaped name
724 # if protectables are found, do matching on the whole escaped name
725 has_protectables = True
725 has_protectables = True
726 text0,text = text,lsplit
726 text0,text = text,lsplit
727 else:
727 else:
728 has_protectables = False
728 has_protectables = False
729 text = os.path.expanduser(text)
729 text = os.path.expanduser(text)
730
730
731 if text == "":
731 if text == "":
732 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
732 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
733
733
734 # Compute the matches from the filesystem
734 # Compute the matches from the filesystem
735 m0 = self.clean_glob(text.replace('\\',''))
735 m0 = self.clean_glob(text.replace('\\',''))
736
736
737 if has_protectables:
737 if has_protectables:
738 # If we had protectables, we need to revert our changes to the
738 # If we had protectables, we need to revert our changes to the
739 # beginning of filename so that we don't double-write the part
739 # beginning of filename so that we don't double-write the part
740 # of the filename we have so far
740 # of the filename we have so far
741 len_lsplit = len(lsplit)
741 len_lsplit = len(lsplit)
742 matches = [text_prefix + text0 +
742 matches = [text_prefix + text0 +
743 protect_filename(f[len_lsplit:]) for f in m0]
743 protect_filename(f[len_lsplit:]) for f in m0]
744 else:
744 else:
745 if open_quotes:
745 if open_quotes:
746 # if we have a string with an open quote, we don't need to
746 # if we have a string with an open quote, we don't need to
747 # protect the names at all (and we _shouldn't_, as it
747 # protect the names at all (and we _shouldn't_, as it
748 # would cause bugs when the filesystem call is made).
748 # would cause bugs when the filesystem call is made).
749 matches = m0
749 matches = m0
750 else:
750 else:
751 matches = [text_prefix +
751 matches = [text_prefix +
752 protect_filename(f) for f in m0]
752 protect_filename(f) for f in m0]
753
753
754 # Mark directories in input list by appending '/' to their names.
754 # Mark directories in input list by appending '/' to their names.
755 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
755 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
756
756
757 def magic_matches(self, text):
757 def magic_matches(self, text):
758 """Match magics"""
758 """Match magics"""
759 # Get all shell magics now rather than statically, so magics loaded at
759 # Get all shell magics now rather than statically, so magics loaded at
760 # runtime show up too.
760 # runtime show up too.
761 lsm = self.shell.magics_manager.lsmagic()
761 lsm = self.shell.magics_manager.lsmagic()
762 line_magics = lsm['line']
762 line_magics = lsm['line']
763 cell_magics = lsm['cell']
763 cell_magics = lsm['cell']
764 pre = self.magic_escape
764 pre = self.magic_escape
765 pre2 = pre+pre
765 pre2 = pre+pre
766
766
767 # Completion logic:
767 # Completion logic:
768 # - user gives %%: only do cell magics
768 # - user gives %%: only do cell magics
769 # - user gives %: do both line and cell magics
769 # - user gives %: do both line and cell magics
770 # - no prefix: do both
770 # - no prefix: do both
771 # In other words, line magics are skipped if the user gives %% explicitly
771 # In other words, line magics are skipped if the user gives %% explicitly
772 bare_text = text.lstrip(pre)
772 bare_text = text.lstrip(pre)
773 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
773 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
774 if not text.startswith(pre2):
774 if not text.startswith(pre2):
775 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
775 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
776 return [cast_unicode_py2(c) for c in comp]
776 return [cast_unicode_py2(c) for c in comp]
777
777
778 def python_jedi_matches(self, text, line_buffer, cursor_pos):
778 def python_jedi_matches(self, text, line_buffer, cursor_pos):
779 """Match attributes or global Python names using Jedi."""
779 """Match attributes or global Python names using Jedi."""
780 if line_buffer.startswith('aimport ') or line_buffer.startswith('%aimport '):
780 if line_buffer.startswith('aimport ') or line_buffer.startswith('%aimport '):
781 return ()
781 return ()
782 namespaces = []
782 namespaces = []
783 if self.namespace is None:
783 if self.namespace is None:
784 import __main__
784 import __main__
785 namespaces.append(__main__.__dict__)
785 namespaces.append(__main__.__dict__)
786 else:
786 else:
787 namespaces.append(self.namespace)
787 namespaces.append(self.namespace)
788 if self.global_namespace is not None:
788 if self.global_namespace is not None:
789 namespaces.append(self.global_namespace)
789 namespaces.append(self.global_namespace)
790
790
791 # cursor_pos is an it, jedi wants line and column
791 # cursor_pos is an it, jedi wants line and column
792
792
793 interpreter = jedi.Interpreter(line_buffer, namespaces, column=cursor_pos)
793 interpreter = jedi.Interpreter(line_buffer, namespaces, column=cursor_pos)
794 path = jedi.parser.user_context.UserContext(line_buffer, \
794 path = jedi.parser.user_context.UserContext(line_buffer, \
795 (1, len(line_buffer))).get_path_until_cursor()
795 (1, len(line_buffer))).get_path_until_cursor()
796 path, dot, like = jedi.api.helpers.completion_parts(path)
796 path, dot, like = jedi.api.helpers.completion_parts(path)
797 if text.startswith('.'):
797 if text.startswith('.'):
798 # text will be `.` on completions like `a[0].<tab>`
798 # text will be `.` on completions like `a[0].<tab>`
799 before = dot
799 before = dot
800 else:
800 else:
801 before = line_buffer[:len(line_buffer) - len(like)]
801 before = line_buffer[:len(line_buffer) - len(like)]
802
802
803
803
804 def trim_start(completion):
804 def trim_start(completion):
805 """completions need to start with `text`, trim the beginning until it does"""
805 """completions need to start with `text`, trim the beginning until it does"""
806 if text in completion and not (completion.startswith(text)):
806 ltext = text.lower()
807 start_index = completion.index(text)
807 lcomp = completion.lower()
808 if ltext in lcomp and not (lcomp.startswith(ltext)):
809 start_index = lcomp.index(ltext)
808 if cursor_pos:
810 if cursor_pos:
809 assert start_index < cursor_pos
811 if start_index >= cursor_pos:
812 start_index = min(start_index, cursor_pos)
810 return completion[start_index:]
813 return completion[start_index:]
811 return completion
814 return completion
812
815
813 completions = interpreter.completions()
816 completions = interpreter.completions()
814
817
815 completion_text = [c.name_with_symbols for c in completions]
818 completion_text = [c.name_with_symbols for c in completions]
816
819
817 if self.omit__names:
820 if self.omit__names:
818 if self.omit__names == 1:
821 if self.omit__names == 1:
819 # true if txt is _not_ a __ name, false otherwise:
822 # true if txt is _not_ a __ name, false otherwise:
820 no__name = lambda txt: not txt.startswith('__')
823 no__name = lambda txt: not txt.startswith('__')
821 else:
824 else:
822 # true if txt is _not_ a _ name, false otherwise:
825 # true if txt is _not_ a _ name, false otherwise:
823 no__name = lambda txt: not txt.startswith('_')
826 no__name = lambda txt: not txt.startswith('_')
824 completion_text = filter(no__name, completion_text)
827 completion_text = filter(no__name, completion_text)
825
828
826
829
827 return [trim_start(before + c_text) for c_text in completion_text]
830 return [trim_start(before + c_text) for c_text in completion_text]
828
831
829
832
830 def python_matches(self, text):
833 def python_matches(self, text):
831 """Match attributes or global python names"""
834 """Match attributes or global python names"""
832 # Jedi completion
835 # Jedi completion
833
836
834 if "." in text:
837 if "." in text:
835 try:
838 try:
836 matches = self.attr_matches(text)
839 matches = self.attr_matches(text)
837 if text.endswith('.') and self.omit__names:
840 if text.endswith('.') and self.omit__names:
838 if self.omit__names == 1:
841 if self.omit__names == 1:
839 # true if txt is _not_ a __ name, false otherwise:
842 # true if txt is _not_ a __ name, false otherwise:
840 no__name = (lambda txt:
843 no__name = (lambda txt:
841 re.match(r'.*\.__.*?__',txt) is None)
844 re.match(r'.*\.__.*?__',txt) is None)
842 else:
845 else:
843 # true if txt is _not_ a _ name, false otherwise:
846 # true if txt is _not_ a _ name, false otherwise:
844 no__name = (lambda txt:
847 no__name = (lambda txt:
845 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
848 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
846 matches = filter(no__name, matches)
849 matches = filter(no__name, matches)
847 except NameError:
850 except NameError:
848 # catches <undefined attributes>.<tab>
851 # catches <undefined attributes>.<tab>
849 matches = []
852 matches = []
850 else:
853 else:
851 matches = self.global_matches(text)
854 matches = self.global_matches(text)
852 return matches
855 return matches
853
856
854 def _default_arguments_from_docstring(self, doc):
857 def _default_arguments_from_docstring(self, doc):
855 """Parse the first line of docstring for call signature.
858 """Parse the first line of docstring for call signature.
856
859
857 Docstring should be of the form 'min(iterable[, key=func])\n'.
860 Docstring should be of the form 'min(iterable[, key=func])\n'.
858 It can also parse cython docstring of the form
861 It can also parse cython docstring of the form
859 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
862 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
860 """
863 """
861 if doc is None:
864 if doc is None:
862 return []
865 return []
863
866
864 #care only the firstline
867 #care only the firstline
865 line = doc.lstrip().splitlines()[0]
868 line = doc.lstrip().splitlines()[0]
866
869
867 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
870 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
868 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
871 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
869 sig = self.docstring_sig_re.search(line)
872 sig = self.docstring_sig_re.search(line)
870 if sig is None:
873 if sig is None:
871 return []
874 return []
872 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
875 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
873 sig = sig.groups()[0].split(',')
876 sig = sig.groups()[0].split(',')
874 ret = []
877 ret = []
875 for s in sig:
878 for s in sig:
876 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
879 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
877 ret += self.docstring_kwd_re.findall(s)
880 ret += self.docstring_kwd_re.findall(s)
878 return ret
881 return ret
879
882
880 def _default_arguments(self, obj):
883 def _default_arguments(self, obj):
881 """Return the list of default arguments of obj if it is callable,
884 """Return the list of default arguments of obj if it is callable,
882 or empty list otherwise."""
885 or empty list otherwise."""
883 call_obj = obj
886 call_obj = obj
884 ret = []
887 ret = []
885 if inspect.isbuiltin(obj):
888 if inspect.isbuiltin(obj):
886 pass
889 pass
887 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
890 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
888 if inspect.isclass(obj):
891 if inspect.isclass(obj):
889 #for cython embededsignature=True the constructor docstring
892 #for cython embededsignature=True the constructor docstring
890 #belongs to the object itself not __init__
893 #belongs to the object itself not __init__
891 ret += self._default_arguments_from_docstring(
894 ret += self._default_arguments_from_docstring(
892 getattr(obj, '__doc__', ''))
895 getattr(obj, '__doc__', ''))
893 # for classes, check for __init__,__new__
896 # for classes, check for __init__,__new__
894 call_obj = (getattr(obj, '__init__', None) or
897 call_obj = (getattr(obj, '__init__', None) or
895 getattr(obj, '__new__', None))
898 getattr(obj, '__new__', None))
896 # for all others, check if they are __call__able
899 # for all others, check if they are __call__able
897 elif hasattr(obj, '__call__'):
900 elif hasattr(obj, '__call__'):
898 call_obj = obj.__call__
901 call_obj = obj.__call__
899 ret += self._default_arguments_from_docstring(
902 ret += self._default_arguments_from_docstring(
900 getattr(call_obj, '__doc__', ''))
903 getattr(call_obj, '__doc__', ''))
901
904
902 if PY3:
905 if PY3:
903 _keeps = (inspect.Parameter.KEYWORD_ONLY,
906 _keeps = (inspect.Parameter.KEYWORD_ONLY,
904 inspect.Parameter.POSITIONAL_OR_KEYWORD)
907 inspect.Parameter.POSITIONAL_OR_KEYWORD)
905 signature = inspect.signature
908 signature = inspect.signature
906 else:
909 else:
907 import IPython.utils.signatures
910 import IPython.utils.signatures
908 _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
911 _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
909 IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
912 IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
910 signature = IPython.utils.signatures.signature
913 signature = IPython.utils.signatures.signature
911
914
912 try:
915 try:
913 sig = signature(call_obj)
916 sig = signature(call_obj)
914 ret.extend(k for k, v in sig.parameters.items() if
917 ret.extend(k for k, v in sig.parameters.items() if
915 v.kind in _keeps)
918 v.kind in _keeps)
916 except ValueError:
919 except ValueError:
917 pass
920 pass
918
921
919 return list(set(ret))
922 return list(set(ret))
920
923
921 def python_func_kw_matches(self,text):
924 def python_func_kw_matches(self,text):
922 """Match named parameters (kwargs) of the last open function"""
925 """Match named parameters (kwargs) of the last open function"""
923
926
924 if "." in text: # a parameter cannot be dotted
927 if "." in text: # a parameter cannot be dotted
925 return []
928 return []
926 try: regexp = self.__funcParamsRegex
929 try: regexp = self.__funcParamsRegex
927 except AttributeError:
930 except AttributeError:
928 regexp = self.__funcParamsRegex = re.compile(r'''
931 regexp = self.__funcParamsRegex = re.compile(r'''
929 '.*?(?<!\\)' | # single quoted strings or
932 '.*?(?<!\\)' | # single quoted strings or
930 ".*?(?<!\\)" | # double quoted strings or
933 ".*?(?<!\\)" | # double quoted strings or
931 \w+ | # identifier
934 \w+ | # identifier
932 \S # other characters
935 \S # other characters
933 ''', re.VERBOSE | re.DOTALL)
936 ''', re.VERBOSE | re.DOTALL)
934 # 1. find the nearest identifier that comes before an unclosed
937 # 1. find the nearest identifier that comes before an unclosed
935 # parenthesis before the cursor
938 # parenthesis before the cursor
936 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
939 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
937 tokens = regexp.findall(self.text_until_cursor)
940 tokens = regexp.findall(self.text_until_cursor)
938 tokens.reverse()
941 tokens.reverse()
939 iterTokens = iter(tokens); openPar = 0
942 iterTokens = iter(tokens); openPar = 0
940
943
941 for token in iterTokens:
944 for token in iterTokens:
942 if token == ')':
945 if token == ')':
943 openPar -= 1
946 openPar -= 1
944 elif token == '(':
947 elif token == '(':
945 openPar += 1
948 openPar += 1
946 if openPar > 0:
949 if openPar > 0:
947 # found the last unclosed parenthesis
950 # found the last unclosed parenthesis
948 break
951 break
949 else:
952 else:
950 return []
953 return []
951 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
954 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
952 ids = []
955 ids = []
953 isId = re.compile(r'\w+$').match
956 isId = re.compile(r'\w+$').match
954
957
955 while True:
958 while True:
956 try:
959 try:
957 ids.append(next(iterTokens))
960 ids.append(next(iterTokens))
958 if not isId(ids[-1]):
961 if not isId(ids[-1]):
959 ids.pop(); break
962 ids.pop(); break
960 if not next(iterTokens) == '.':
963 if not next(iterTokens) == '.':
961 break
964 break
962 except StopIteration:
965 except StopIteration:
963 break
966 break
964 # lookup the candidate callable matches either using global_matches
967 # lookup the candidate callable matches either using global_matches
965 # or attr_matches for dotted names
968 # or attr_matches for dotted names
966 if len(ids) == 1:
969 if len(ids) == 1:
967 callableMatches = self.global_matches(ids[0])
970 callableMatches = self.global_matches(ids[0])
968 else:
971 else:
969 callableMatches = self.attr_matches('.'.join(ids[::-1]))
972 callableMatches = self.attr_matches('.'.join(ids[::-1]))
970 argMatches = []
973 argMatches = []
971 for callableMatch in callableMatches:
974 for callableMatch in callableMatches:
972 try:
975 try:
973 namedArgs = self._default_arguments(eval(callableMatch,
976 namedArgs = self._default_arguments(eval(callableMatch,
974 self.namespace))
977 self.namespace))
975 except:
978 except:
976 continue
979 continue
977
980
978 for namedArg in namedArgs:
981 for namedArg in namedArgs:
979 if namedArg.startswith(text):
982 if namedArg.startswith(text):
980 argMatches.append(u"%s=" %namedArg)
983 argMatches.append(u"%s=" %namedArg)
981 return argMatches
984 return argMatches
982
985
983 def dict_key_matches(self, text):
986 def dict_key_matches(self, text):
984 "Match string keys in a dictionary, after e.g. 'foo[' "
987 "Match string keys in a dictionary, after e.g. 'foo[' "
985 def get_keys(obj):
988 def get_keys(obj):
986 # Objects can define their own completions by defining an
989 # Objects can define their own completions by defining an
987 # _ipy_key_completions_() method.
990 # _ipy_key_completions_() method.
988 method = get_real_method(obj, '_ipython_key_completions_')
991 method = get_real_method(obj, '_ipython_key_completions_')
989 if method is not None:
992 if method is not None:
990 return method()
993 return method()
991
994
992 # Special case some common in-memory dict-like types
995 # Special case some common in-memory dict-like types
993 if isinstance(obj, dict) or\
996 if isinstance(obj, dict) or\
994 _safe_isinstance(obj, 'pandas', 'DataFrame'):
997 _safe_isinstance(obj, 'pandas', 'DataFrame'):
995 try:
998 try:
996 return list(obj.keys())
999 return list(obj.keys())
997 except Exception:
1000 except Exception:
998 return []
1001 return []
999 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1002 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1000 _safe_isinstance(obj, 'numpy', 'void'):
1003 _safe_isinstance(obj, 'numpy', 'void'):
1001 return obj.dtype.names or []
1004 return obj.dtype.names or []
1002 return []
1005 return []
1003
1006
1004 try:
1007 try:
1005 regexps = self.__dict_key_regexps
1008 regexps = self.__dict_key_regexps
1006 except AttributeError:
1009 except AttributeError:
1007 dict_key_re_fmt = r'''(?x)
1010 dict_key_re_fmt = r'''(?x)
1008 ( # match dict-referring expression wrt greedy setting
1011 ( # match dict-referring expression wrt greedy setting
1009 %s
1012 %s
1010 )
1013 )
1011 \[ # open bracket
1014 \[ # open bracket
1012 \s* # and optional whitespace
1015 \s* # and optional whitespace
1013 ([uUbB]? # string prefix (r not handled)
1016 ([uUbB]? # string prefix (r not handled)
1014 (?: # unclosed string
1017 (?: # unclosed string
1015 '(?:[^']|(?<!\\)\\')*
1018 '(?:[^']|(?<!\\)\\')*
1016 |
1019 |
1017 "(?:[^"]|(?<!\\)\\")*
1020 "(?:[^"]|(?<!\\)\\")*
1018 )
1021 )
1019 )?
1022 )?
1020 $
1023 $
1021 '''
1024 '''
1022 regexps = self.__dict_key_regexps = {
1025 regexps = self.__dict_key_regexps = {
1023 False: re.compile(dict_key_re_fmt % '''
1026 False: re.compile(dict_key_re_fmt % '''
1024 # identifiers separated by .
1027 # identifiers separated by .
1025 (?!\d)\w+
1028 (?!\d)\w+
1026 (?:\.(?!\d)\w+)*
1029 (?:\.(?!\d)\w+)*
1027 '''),
1030 '''),
1028 True: re.compile(dict_key_re_fmt % '''
1031 True: re.compile(dict_key_re_fmt % '''
1029 .+
1032 .+
1030 ''')
1033 ''')
1031 }
1034 }
1032
1035
1033 match = regexps[self.greedy].search(self.text_until_cursor)
1036 match = regexps[self.greedy].search(self.text_until_cursor)
1034 if match is None:
1037 if match is None:
1035 return []
1038 return []
1036
1039
1037 expr, prefix = match.groups()
1040 expr, prefix = match.groups()
1038 try:
1041 try:
1039 obj = eval(expr, self.namespace)
1042 obj = eval(expr, self.namespace)
1040 except Exception:
1043 except Exception:
1041 try:
1044 try:
1042 obj = eval(expr, self.global_namespace)
1045 obj = eval(expr, self.global_namespace)
1043 except Exception:
1046 except Exception:
1044 return []
1047 return []
1045
1048
1046 keys = get_keys(obj)
1049 keys = get_keys(obj)
1047 if not keys:
1050 if not keys:
1048 return keys
1051 return keys
1049 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1052 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1050 if not matches:
1053 if not matches:
1051 return matches
1054 return matches
1052
1055
1053 # get the cursor position of
1056 # get the cursor position of
1054 # - the text being completed
1057 # - the text being completed
1055 # - the start of the key text
1058 # - the start of the key text
1056 # - the start of the completion
1059 # - the start of the completion
1057 text_start = len(self.text_until_cursor) - len(text)
1060 text_start = len(self.text_until_cursor) - len(text)
1058 if prefix:
1061 if prefix:
1059 key_start = match.start(2)
1062 key_start = match.start(2)
1060 completion_start = key_start + token_offset
1063 completion_start = key_start + token_offset
1061 else:
1064 else:
1062 key_start = completion_start = match.end()
1065 key_start = completion_start = match.end()
1063
1066
1064 # grab the leading prefix, to make sure all completions start with `text`
1067 # grab the leading prefix, to make sure all completions start with `text`
1065 if text_start > key_start:
1068 if text_start > key_start:
1066 leading = ''
1069 leading = ''
1067 else:
1070 else:
1068 leading = text[text_start:completion_start]
1071 leading = text[text_start:completion_start]
1069
1072
1070 # the index of the `[` character
1073 # the index of the `[` character
1071 bracket_idx = match.end(1)
1074 bracket_idx = match.end(1)
1072
1075
1073 # append closing quote and bracket as appropriate
1076 # append closing quote and bracket as appropriate
1074 # this is *not* appropriate if the opening quote or bracket is outside
1077 # this is *not* appropriate if the opening quote or bracket is outside
1075 # the text given to this method
1078 # the text given to this method
1076 suf = ''
1079 suf = ''
1077 continuation = self.line_buffer[len(self.text_until_cursor):]
1080 continuation = self.line_buffer[len(self.text_until_cursor):]
1078 if key_start > text_start and closing_quote:
1081 if key_start > text_start and closing_quote:
1079 # quotes were opened inside text, maybe close them
1082 # quotes were opened inside text, maybe close them
1080 if continuation.startswith(closing_quote):
1083 if continuation.startswith(closing_quote):
1081 continuation = continuation[len(closing_quote):]
1084 continuation = continuation[len(closing_quote):]
1082 else:
1085 else:
1083 suf += closing_quote
1086 suf += closing_quote
1084 if bracket_idx > text_start:
1087 if bracket_idx > text_start:
1085 # brackets were opened inside text, maybe close them
1088 # brackets were opened inside text, maybe close them
1086 if not continuation.startswith(']'):
1089 if not continuation.startswith(']'):
1087 suf += ']'
1090 suf += ']'
1088
1091
1089 return [leading + k + suf for k in matches]
1092 return [leading + k + suf for k in matches]
1090
1093
1091 def unicode_name_matches(self, text):
1094 def unicode_name_matches(self, text):
1092 u"""Match Latex-like syntax for unicode characters base
1095 u"""Match Latex-like syntax for unicode characters base
1093 on the name of the character.
1096 on the name of the character.
1094
1097
1095 This does \\GREEK SMALL LETTER ETA -> Ξ·
1098 This does \\GREEK SMALL LETTER ETA -> Ξ·
1096
1099
1097 Works only on valid python 3 identifier, or on combining characters that
1100 Works only on valid python 3 identifier, or on combining characters that
1098 will combine to form a valid identifier.
1101 will combine to form a valid identifier.
1099
1102
1100 Used on Python 3 only.
1103 Used on Python 3 only.
1101 """
1104 """
1102 slashpos = text.rfind('\\')
1105 slashpos = text.rfind('\\')
1103 if slashpos > -1:
1106 if slashpos > -1:
1104 s = text[slashpos+1:]
1107 s = text[slashpos+1:]
1105 try :
1108 try :
1106 unic = unicodedata.lookup(s)
1109 unic = unicodedata.lookup(s)
1107 # allow combining chars
1110 # allow combining chars
1108 if ('a'+unic).isidentifier():
1111 if ('a'+unic).isidentifier():
1109 return '\\'+s,[unic]
1112 return '\\'+s,[unic]
1110 except KeyError as e:
1113 except KeyError as e:
1111 pass
1114 pass
1112 return u'', []
1115 return u'', []
1113
1116
1114
1117
1115
1118
1116
1119
1117 def latex_matches(self, text):
1120 def latex_matches(self, text):
1118 u"""Match Latex syntax for unicode characters.
1121 u"""Match Latex syntax for unicode characters.
1119
1122
1120 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1123 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1121
1124
1122 Used on Python 3 only.
1125 Used on Python 3 only.
1123 """
1126 """
1124 slashpos = text.rfind('\\')
1127 slashpos = text.rfind('\\')
1125 if slashpos > -1:
1128 if slashpos > -1:
1126 s = text[slashpos:]
1129 s = text[slashpos:]
1127 if s in latex_symbols:
1130 if s in latex_symbols:
1128 # Try to complete a full latex symbol to unicode
1131 # Try to complete a full latex symbol to unicode
1129 # \\alpha -> Ξ±
1132 # \\alpha -> Ξ±
1130 return s, [latex_symbols[s]]
1133 return s, [latex_symbols[s]]
1131 else:
1134 else:
1132 # If a user has partially typed a latex symbol, give them
1135 # If a user has partially typed a latex symbol, give them
1133 # a full list of options \al -> [\aleph, \alpha]
1136 # a full list of options \al -> [\aleph, \alpha]
1134 matches = [k for k in latex_symbols if k.startswith(s)]
1137 matches = [k for k in latex_symbols if k.startswith(s)]
1135 return s, matches
1138 return s, matches
1136 return u'', []
1139 return u'', []
1137
1140
1138 def dispatch_custom_completer(self, text):
1141 def dispatch_custom_completer(self, text):
1139 line = self.line_buffer
1142 line = self.line_buffer
1140 if not line.strip():
1143 if not line.strip():
1141 return None
1144 return None
1142
1145
1143 # Create a little structure to pass all the relevant information about
1146 # Create a little structure to pass all the relevant information about
1144 # the current completion to any custom completer.
1147 # the current completion to any custom completer.
1145 event = Bunch()
1148 event = Bunch()
1146 event.line = line
1149 event.line = line
1147 event.symbol = text
1150 event.symbol = text
1148 cmd = line.split(None,1)[0]
1151 cmd = line.split(None,1)[0]
1149 event.command = cmd
1152 event.command = cmd
1150 event.text_until_cursor = self.text_until_cursor
1153 event.text_until_cursor = self.text_until_cursor
1151
1154
1152 # for foo etc, try also to find completer for %foo
1155 # for foo etc, try also to find completer for %foo
1153 if not cmd.startswith(self.magic_escape):
1156 if not cmd.startswith(self.magic_escape):
1154 try_magic = self.custom_completers.s_matches(
1157 try_magic = self.custom_completers.s_matches(
1155 self.magic_escape + cmd)
1158 self.magic_escape + cmd)
1156 else:
1159 else:
1157 try_magic = []
1160 try_magic = []
1158
1161
1159 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1162 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1160 try_magic,
1163 try_magic,
1161 self.custom_completers.flat_matches(self.text_until_cursor)):
1164 self.custom_completers.flat_matches(self.text_until_cursor)):
1162 try:
1165 try:
1163 res = c(event)
1166 res = c(event)
1164 if res:
1167 if res:
1165 # first, try case sensitive match
1168 # first, try case sensitive match
1166 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1169 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1167 if withcase:
1170 if withcase:
1168 return withcase
1171 return withcase
1169 # if none, then case insensitive ones are ok too
1172 # if none, then case insensitive ones are ok too
1170 text_low = text.lower()
1173 text_low = text.lower()
1171 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1174 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1172 except TryNext:
1175 except TryNext:
1173 pass
1176 pass
1174
1177
1175 return None
1178 return None
1176
1179
1177 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1180 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1178 """Find completions for the given text and line context.
1181 """Find completions for the given text and line context.
1179
1182
1180 Note that both the text and the line_buffer are optional, but at least
1183 Note that both the text and the line_buffer are optional, but at least
1181 one of them must be given.
1184 one of them must be given.
1182
1185
1183 Parameters
1186 Parameters
1184 ----------
1187 ----------
1185 text : string, optional
1188 text : string, optional
1186 Text to perform the completion on. If not given, the line buffer
1189 Text to perform the completion on. If not given, the line buffer
1187 is split using the instance's CompletionSplitter object.
1190 is split using the instance's CompletionSplitter object.
1188
1191
1189 line_buffer : string, optional
1192 line_buffer : string, optional
1190 If not given, the completer attempts to obtain the current line
1193 If not given, the completer attempts to obtain the current line
1191 buffer via readline. This keyword allows clients which are
1194 buffer via readline. This keyword allows clients which are
1192 requesting for text completions in non-readline contexts to inform
1195 requesting for text completions in non-readline contexts to inform
1193 the completer of the entire text.
1196 the completer of the entire text.
1194
1197
1195 cursor_pos : int, optional
1198 cursor_pos : int, optional
1196 Index of the cursor in the full line buffer. Should be provided by
1199 Index of the cursor in the full line buffer. Should be provided by
1197 remote frontends where kernel has no access to frontend state.
1200 remote frontends where kernel has no access to frontend state.
1198
1201
1199 Returns
1202 Returns
1200 -------
1203 -------
1201 text : str
1204 text : str
1202 Text that was actually used in the completion.
1205 Text that was actually used in the completion.
1203
1206
1204 matches : list
1207 matches : list
1205 A list of completion matches.
1208 A list of completion matches.
1206 """
1209 """
1207 # if the cursor position isn't given, the only sane assumption we can
1210 # if the cursor position isn't given, the only sane assumption we can
1208 # make is that it's at the end of the line (the common case)
1211 # make is that it's at the end of the line (the common case)
1209 if cursor_pos is None:
1212 if cursor_pos is None:
1210 cursor_pos = len(line_buffer) if text is None else len(text)
1213 cursor_pos = len(line_buffer) if text is None else len(text)
1211
1214
1212 if PY3:
1215 if PY3:
1213
1216
1214 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1217 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1215 latex_text, latex_matches = self.latex_matches(base_text)
1218 latex_text, latex_matches = self.latex_matches(base_text)
1216 if latex_matches:
1219 if latex_matches:
1217 return latex_text, latex_matches
1220 return latex_text, latex_matches
1218 name_text = ''
1221 name_text = ''
1219 name_matches = []
1222 name_matches = []
1220 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1223 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1221 name_text, name_matches = meth(base_text)
1224 name_text, name_matches = meth(base_text)
1222 if name_text:
1225 if name_text:
1223 return name_text, name_matches
1226 return name_text, name_matches
1224
1227
1225 # if text is either None or an empty string, rely on the line buffer
1228 # if text is either None or an empty string, rely on the line buffer
1226 if not text:
1229 if not text:
1227 text = self.splitter.split_line(line_buffer, cursor_pos)
1230 text = self.splitter.split_line(line_buffer, cursor_pos)
1228
1231
1229 # If no line buffer is given, assume the input text is all there was
1232 # If no line buffer is given, assume the input text is all there was
1230 if line_buffer is None:
1233 if line_buffer is None:
1231 line_buffer = text
1234 line_buffer = text
1232
1235
1233 self.line_buffer = line_buffer
1236 self.line_buffer = line_buffer
1234 self.text_until_cursor = self.line_buffer[:cursor_pos]
1237 self.text_until_cursor = self.line_buffer[:cursor_pos]
1235
1238
1236 # Start with a clean slate of completions
1239 # Start with a clean slate of completions
1237 self.matches[:] = []
1240 self.matches[:] = []
1238 custom_res = self.dispatch_custom_completer(text)
1241 custom_res = self.dispatch_custom_completer(text)
1239 if custom_res is not None:
1242 if custom_res is not None:
1240 # did custom completers produce something?
1243 # did custom completers produce something?
1241 self.matches = custom_res
1244 self.matches = custom_res
1242 else:
1245 else:
1243 # Extend the list of completions with the results of each
1246 # Extend the list of completions with the results of each
1244 # matcher, so we return results to the user from all
1247 # matcher, so we return results to the user from all
1245 # namespaces.
1248 # namespaces.
1246 if self.merge_completions:
1249 if self.merge_completions:
1247 self.matches = []
1250 self.matches = []
1248 for matcher in self.matchers:
1251 for matcher in self.matchers:
1249 try:
1252 try:
1250 self.matches.extend(matcher(text))
1253 self.matches.extend(matcher(text))
1251 except:
1254 except:
1252 # Show the ugly traceback if the matcher causes an
1255 # Show the ugly traceback if the matcher causes an
1253 # exception, but do NOT crash the kernel!
1256 # exception, but do NOT crash the kernel!
1254 sys.excepthook(*sys.exc_info())
1257 sys.excepthook(*sys.exc_info())
1255 else:
1258 else:
1256 for matcher in self.matchers:
1259 for matcher in self.matchers:
1257 self.matches = matcher(text)
1260 self.matches = matcher(text)
1258 if self.matches:
1261 if self.matches:
1259 break
1262 break
1260 # FIXME: we should extend our api to return a dict with completions for
1263 # FIXME: we should extend our api to return a dict with completions for
1261 # different types of objects. The rlcomplete() method could then
1264 # different types of objects. The rlcomplete() method could then
1262 # simply collapse the dict into a list for readline, but we'd have
1265 # simply collapse the dict into a list for readline, but we'd have
1263 # richer completion semantics in other evironments.
1266 # richer completion semantics in other evironments.
1264 if self.use_jedi_completions:
1267 if self.use_jedi_completions:
1265 self.matches.extend(self.python_jedi_matches(text, line_buffer, cursor_pos))
1268 self.matches.extend(self.python_jedi_matches(text, line_buffer, cursor_pos))
1266
1269
1267 self.matches = sorted(set(self.matches), key=completions_sorting_key)
1270 self.matches = sorted(set(self.matches), key=completions_sorting_key)
1268
1271
1269 return text, self.matches
1272 return text, self.matches
1270
1273
1271 def rlcomplete(self, text, state):
1274 def rlcomplete(self, text, state):
1272 """Return the state-th possible completion for 'text'.
1275 """Return the state-th possible completion for 'text'.
1273
1276
1274 This is called successively with state == 0, 1, 2, ... until it
1277 This is called successively with state == 0, 1, 2, ... until it
1275 returns None. The completion should begin with 'text'.
1278 returns None. The completion should begin with 'text'.
1276
1279
1277 Parameters
1280 Parameters
1278 ----------
1281 ----------
1279 text : string
1282 text : string
1280 Text to perform the completion on.
1283 Text to perform the completion on.
1281
1284
1282 state : int
1285 state : int
1283 Counter used by readline.
1286 Counter used by readline.
1284 """
1287 """
1285 if state==0:
1288 if state==0:
1286
1289
1287 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1290 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1288 cursor_pos = self.readline.get_endidx()
1291 cursor_pos = self.readline.get_endidx()
1289
1292
1290 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1293 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1291 # (text, line_buffer, cursor_pos) ) # dbg
1294 # (text, line_buffer, cursor_pos) ) # dbg
1292
1295
1293 # if there is only a tab on a line with only whitespace, instead of
1296 # if there is only a tab on a line with only whitespace, instead of
1294 # the mostly useless 'do you want to see all million completions'
1297 # the mostly useless 'do you want to see all million completions'
1295 # message, just do the right thing and give the user his tab!
1298 # message, just do the right thing and give the user his tab!
1296 # Incidentally, this enables pasting of tabbed text from an editor
1299 # Incidentally, this enables pasting of tabbed text from an editor
1297 # (as long as autoindent is off).
1300 # (as long as autoindent is off).
1298
1301
1299 # It should be noted that at least pyreadline still shows file
1302 # It should be noted that at least pyreadline still shows file
1300 # completions - is there a way around it?
1303 # completions - is there a way around it?
1301
1304
1302 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1305 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1303 # we don't interfere with their own tab-completion mechanism.
1306 # we don't interfere with their own tab-completion mechanism.
1304 if not (self.dumb_terminal or line_buffer.strip()):
1307 if not (self.dumb_terminal or line_buffer.strip()):
1305 self.readline.insert_text('\t')
1308 self.readline.insert_text('\t')
1306 sys.stdout.flush()
1309 sys.stdout.flush()
1307 return None
1310 return None
1308
1311
1309 # Note: debugging exceptions that may occur in completion is very
1312 # Note: debugging exceptions that may occur in completion is very
1310 # tricky, because readline unconditionally silences them. So if
1313 # tricky, because readline unconditionally silences them. So if
1311 # during development you suspect a bug in the completion code, turn
1314 # during development you suspect a bug in the completion code, turn
1312 # this flag on temporarily by uncommenting the second form (don't
1315 # this flag on temporarily by uncommenting the second form (don't
1313 # flip the value in the first line, as the '# dbg' marker can be
1316 # flip the value in the first line, as the '# dbg' marker can be
1314 # automatically detected and is used elsewhere).
1317 # automatically detected and is used elsewhere).
1315 DEBUG = False
1318 DEBUG = False
1316 #DEBUG = True # dbg
1319 #DEBUG = True # dbg
1317 if DEBUG:
1320 if DEBUG:
1318 try:
1321 try:
1319 self.complete(text, line_buffer, cursor_pos)
1322 self.complete(text, line_buffer, cursor_pos)
1320 except:
1323 except:
1321 import traceback; traceback.print_exc()
1324 import traceback; traceback.print_exc()
1322 else:
1325 else:
1323 # The normal production version is here
1326 # The normal production version is here
1324
1327
1325 # This method computes the self.matches array
1328 # This method computes the self.matches array
1326 self.complete(text, line_buffer, cursor_pos)
1329 self.complete(text, line_buffer, cursor_pos)
1327
1330
1328 try:
1331 try:
1329 return self.matches[state]
1332 return self.matches[state]
1330 except IndexError:
1333 except IndexError:
1331 return None
1334 return None
1332
1335
@@ -1,805 +1,805 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Tests for the IPython tab-completion machinery."""
2 """Tests for the IPython tab-completion machinery."""
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import sys
8 import sys
9 import unittest
9 import unittest
10
10
11 from contextlib import contextmanager
11 from contextlib import contextmanager
12
12
13 import nose.tools as nt
13 import nose.tools as nt
14
14
15 from traitlets.config.loader import Config
15 from traitlets.config.loader import Config
16 from IPython import get_ipython
16 from IPython import get_ipython
17 from IPython.core import completer
17 from IPython.core import completer
18 from IPython.external.decorators import knownfailureif
18 from IPython.external.decorators import knownfailureif
19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 from IPython.utils.generics import complete_object
20 from IPython.utils.generics import complete_object
21 from IPython.utils.py3compat import string_types, unicode_type
21 from IPython.utils.py3compat import string_types, unicode_type
22 from IPython.testing import decorators as dec
22 from IPython.testing import decorators as dec
23
23
24 #-----------------------------------------------------------------------------
24 #-----------------------------------------------------------------------------
25 # Test functions
25 # Test functions
26 #-----------------------------------------------------------------------------
26 #-----------------------------------------------------------------------------
27
27
28 @contextmanager
28 @contextmanager
29 def greedy_completion():
29 def greedy_completion():
30 ip = get_ipython()
30 ip = get_ipython()
31 greedy_original = ip.Completer.greedy
31 greedy_original = ip.Completer.greedy
32 try:
32 try:
33 ip.Completer.greedy = True
33 ip.Completer.greedy = True
34 yield
34 yield
35 finally:
35 finally:
36 ip.Completer.greedy = greedy_original
36 ip.Completer.greedy = greedy_original
37
37
38 def test_protect_filename():
38 def test_protect_filename():
39 pairs = [ ('abc','abc'),
39 pairs = [ ('abc','abc'),
40 (' abc',r'\ abc'),
40 (' abc',r'\ abc'),
41 ('a bc',r'a\ bc'),
41 ('a bc',r'a\ bc'),
42 ('a bc',r'a\ \ bc'),
42 ('a bc',r'a\ \ bc'),
43 (' bc',r'\ \ bc'),
43 (' bc',r'\ \ bc'),
44 ]
44 ]
45 # On posix, we also protect parens and other special characters
45 # On posix, we also protect parens and other special characters
46 if sys.platform != 'win32':
46 if sys.platform != 'win32':
47 pairs.extend( [('a(bc',r'a\(bc'),
47 pairs.extend( [('a(bc',r'a\(bc'),
48 ('a)bc',r'a\)bc'),
48 ('a)bc',r'a\)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
50 ('a[1]bc', r'a\[1\]bc'),
50 ('a[1]bc', r'a\[1\]bc'),
51 ('a{1}bc', r'a\{1\}bc'),
51 ('a{1}bc', r'a\{1\}bc'),
52 ('a#bc', r'a\#bc'),
52 ('a#bc', r'a\#bc'),
53 ('a?bc', r'a\?bc'),
53 ('a?bc', r'a\?bc'),
54 ('a=bc', r'a\=bc'),
54 ('a=bc', r'a\=bc'),
55 ('a\\bc', r'a\\bc'),
55 ('a\\bc', r'a\\bc'),
56 ('a|bc', r'a\|bc'),
56 ('a|bc', r'a\|bc'),
57 ('a;bc', r'a\;bc'),
57 ('a;bc', r'a\;bc'),
58 ('a:bc', r'a\:bc'),
58 ('a:bc', r'a\:bc'),
59 ("a'bc", r"a\'bc"),
59 ("a'bc", r"a\'bc"),
60 ('a*bc', r'a\*bc'),
60 ('a*bc', r'a\*bc'),
61 ('a"bc', r'a\"bc'),
61 ('a"bc', r'a\"bc'),
62 ('a^bc', r'a\^bc'),
62 ('a^bc', r'a\^bc'),
63 ('a&bc', r'a\&bc'),
63 ('a&bc', r'a\&bc'),
64 ] )
64 ] )
65 # run the actual tests
65 # run the actual tests
66 for s1, s2 in pairs:
66 for s1, s2 in pairs:
67 s1p = completer.protect_filename(s1)
67 s1p = completer.protect_filename(s1)
68 nt.assert_equal(s1p, s2)
68 nt.assert_equal(s1p, s2)
69
69
70
70
71 def check_line_split(splitter, test_specs):
71 def check_line_split(splitter, test_specs):
72 for part1, part2, split in test_specs:
72 for part1, part2, split in test_specs:
73 cursor_pos = len(part1)
73 cursor_pos = len(part1)
74 line = part1+part2
74 line = part1+part2
75 out = splitter.split_line(line, cursor_pos)
75 out = splitter.split_line(line, cursor_pos)
76 nt.assert_equal(out, split)
76 nt.assert_equal(out, split)
77
77
78
78
79 def test_line_split():
79 def test_line_split():
80 """Basic line splitter test with default specs."""
80 """Basic line splitter test with default specs."""
81 sp = completer.CompletionSplitter()
81 sp = completer.CompletionSplitter()
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
84 # was at the end of part1. So an empty part2 represents someone hitting
84 # was at the end of part1. So an empty part2 represents someone hitting
85 # tab at the end of the line, the most common case.
85 # tab at the end of the line, the most common case.
86 t = [('run some/scrip', '', 'some/scrip'),
86 t = [('run some/scrip', '', 'some/scrip'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
88 ('echo $HOM', '', 'HOM'),
88 ('echo $HOM', '', 'HOM'),
89 ('print sys.pa', '', 'sys.pa'),
89 ('print sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
91 ("execfile('scripts/er", '', 'scripts/er'),
91 ("execfile('scripts/er", '', 'scripts/er'),
92 ('a[x.', '', 'x.'),
92 ('a[x.', '', 'x.'),
93 ('a[x.', 'y', 'x.'),
93 ('a[x.', 'y', 'x.'),
94 ('cd "some_file/', '', 'some_file/'),
94 ('cd "some_file/', '', 'some_file/'),
95 ]
95 ]
96 check_line_split(sp, t)
96 check_line_split(sp, t)
97 # Ensure splitting works OK with unicode by re-running the tests with
97 # Ensure splitting works OK with unicode by re-running the tests with
98 # all inputs turned into unicode
98 # all inputs turned into unicode
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
100
100
101
101
102 def test_custom_completion_error():
102 def test_custom_completion_error():
103 """Test that errors from custom attribute completers are silenced."""
103 """Test that errors from custom attribute completers are silenced."""
104 ip = get_ipython()
104 ip = get_ipython()
105 class A(object): pass
105 class A(object): pass
106 ip.user_ns['a'] = A()
106 ip.user_ns['a'] = A()
107
107
108 @complete_object.when_type(A)
108 @complete_object.when_type(A)
109 def complete_A(a, existing_completions):
109 def complete_A(a, existing_completions):
110 raise TypeError("this should be silenced")
110 raise TypeError("this should be silenced")
111
111
112 ip.complete("a.")
112 ip.complete("a.")
113
113
114
114
115 def test_unicode_completions():
115 def test_unicode_completions():
116 ip = get_ipython()
116 ip = get_ipython()
117 # Some strings that trigger different types of completion. Check them both
117 # Some strings that trigger different types of completion. Check them both
118 # in str and unicode forms
118 # in str and unicode forms
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
120 for t in s + list(map(unicode_type, s)):
120 for t in s + list(map(unicode_type, s)):
121 # We don't need to check exact completion values (they may change
121 # We don't need to check exact completion values (they may change
122 # depending on the state of the namespace, but at least no exceptions
122 # depending on the state of the namespace, but at least no exceptions
123 # should be thrown and the return value should be a pair of text, list
123 # should be thrown and the return value should be a pair of text, list
124 # values.
124 # values.
125 text, matches = ip.complete(t)
125 text, matches = ip.complete(t)
126 nt.assert_true(isinstance(text, string_types))
126 nt.assert_true(isinstance(text, string_types))
127 nt.assert_true(isinstance(matches, list))
127 nt.assert_true(isinstance(matches, list))
128
128
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
130 def test_latex_completions():
130 def test_latex_completions():
131 from IPython.core.latex_symbols import latex_symbols
131 from IPython.core.latex_symbols import latex_symbols
132 import random
132 import random
133 ip = get_ipython()
133 ip = get_ipython()
134 # Test some random unicode symbols
134 # Test some random unicode symbols
135 keys = random.sample(latex_symbols.keys(), 10)
135 keys = random.sample(latex_symbols.keys(), 10)
136 for k in keys:
136 for k in keys:
137 text, matches = ip.complete(k)
137 text, matches = ip.complete(k)
138 nt.assert_equal(len(matches),1)
138 nt.assert_equal(len(matches),1)
139 nt.assert_equal(text, k)
139 nt.assert_equal(text, k)
140 nt.assert_equal(matches[0], latex_symbols[k])
140 nt.assert_equal(matches[0], latex_symbols[k])
141 # Test a more complex line
141 # Test a more complex line
142 text, matches = ip.complete(u'print(\\alpha')
142 text, matches = ip.complete(u'print(\\alpha')
143 nt.assert_equals(text, u'\\alpha')
143 nt.assert_equals(text, u'\\alpha')
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
145 # Test multiple matching latex symbols
145 # Test multiple matching latex symbols
146 text, matches = ip.complete(u'\\al')
146 text, matches = ip.complete(u'\\al')
147 nt.assert_in('\\alpha', matches)
147 nt.assert_in('\\alpha', matches)
148 nt.assert_in('\\aleph', matches)
148 nt.assert_in('\\aleph', matches)
149
149
150
150
151
151
152
152
153 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
153 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
154 def test_back_latex_completion():
154 def test_back_latex_completion():
155 ip = get_ipython()
155 ip = get_ipython()
156
156
157 # do not return more than 1 matches fro \beta, only the latex one.
157 # do not return more than 1 matches fro \beta, only the latex one.
158 name, matches = ip.complete('\\Ξ²')
158 name, matches = ip.complete('\\Ξ²')
159 nt.assert_equal(len(matches), 1)
159 nt.assert_equal(len(matches), 1)
160 nt.assert_equal(matches[0], '\\beta')
160 nt.assert_equal(matches[0], '\\beta')
161
161
162 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
162 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
163 def test_back_unicode_completion():
163 def test_back_unicode_completion():
164 ip = get_ipython()
164 ip = get_ipython()
165
165
166 name, matches = ip.complete('\\β…€')
166 name, matches = ip.complete('\\β…€')
167 nt.assert_equal(len(matches), 1)
167 nt.assert_equal(len(matches), 1)
168 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
168 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
169
169
170
170
171 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
171 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
172 def test_forward_unicode_completion():
172 def test_forward_unicode_completion():
173 ip = get_ipython()
173 ip = get_ipython()
174
174
175 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
175 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
176 nt.assert_equal(len(matches), 1)
176 nt.assert_equal(len(matches), 1)
177 nt.assert_equal(matches[0], 'β…€')
177 nt.assert_equal(matches[0], 'β…€')
178
178
179 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
179 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
180 def test_no_ascii_back_completion():
180 def test_no_ascii_back_completion():
181 ip = get_ipython()
181 ip = get_ipython()
182 with TemporaryWorkingDirectory(): # Avoid any filename completions
182 with TemporaryWorkingDirectory(): # Avoid any filename completions
183 # single ascii letter that don't have yet completions
183 # single ascii letter that don't have yet completions
184 for letter in 'jJ' :
184 for letter in 'jJ' :
185 name, matches = ip.complete('\\'+letter)
185 name, matches = ip.complete('\\'+letter)
186 nt.assert_equal(matches, [])
186 nt.assert_equal(matches, [])
187
187
188
188
189
189
190
190
191 class CompletionSplitterTestCase(unittest.TestCase):
191 class CompletionSplitterTestCase(unittest.TestCase):
192 def setUp(self):
192 def setUp(self):
193 self.sp = completer.CompletionSplitter()
193 self.sp = completer.CompletionSplitter()
194
194
195 def test_delim_setting(self):
195 def test_delim_setting(self):
196 self.sp.delims = ' '
196 self.sp.delims = ' '
197 nt.assert_equal(self.sp.delims, ' ')
197 nt.assert_equal(self.sp.delims, ' ')
198 nt.assert_equal(self.sp._delim_expr, '[\ ]')
198 nt.assert_equal(self.sp._delim_expr, '[\ ]')
199
199
200 def test_spaces(self):
200 def test_spaces(self):
201 """Test with only spaces as split chars."""
201 """Test with only spaces as split chars."""
202 self.sp.delims = ' '
202 self.sp.delims = ' '
203 t = [('foo', '', 'foo'),
203 t = [('foo', '', 'foo'),
204 ('run foo', '', 'foo'),
204 ('run foo', '', 'foo'),
205 ('run foo', 'bar', 'foo'),
205 ('run foo', 'bar', 'foo'),
206 ]
206 ]
207 check_line_split(self.sp, t)
207 check_line_split(self.sp, t)
208
208
209
209
210 def test_has_open_quotes1():
210 def test_has_open_quotes1():
211 for s in ["'", "'''", "'hi' '"]:
211 for s in ["'", "'''", "'hi' '"]:
212 nt.assert_equal(completer.has_open_quotes(s), "'")
212 nt.assert_equal(completer.has_open_quotes(s), "'")
213
213
214
214
215 def test_has_open_quotes2():
215 def test_has_open_quotes2():
216 for s in ['"', '"""', '"hi" "']:
216 for s in ['"', '"""', '"hi" "']:
217 nt.assert_equal(completer.has_open_quotes(s), '"')
217 nt.assert_equal(completer.has_open_quotes(s), '"')
218
218
219
219
220 def test_has_open_quotes3():
220 def test_has_open_quotes3():
221 for s in ["''", "''' '''", "'hi' 'ipython'"]:
221 for s in ["''", "''' '''", "'hi' 'ipython'"]:
222 nt.assert_false(completer.has_open_quotes(s))
222 nt.assert_false(completer.has_open_quotes(s))
223
223
224
224
225 def test_has_open_quotes4():
225 def test_has_open_quotes4():
226 for s in ['""', '""" """', '"hi" "ipython"']:
226 for s in ['""', '""" """', '"hi" "ipython"']:
227 nt.assert_false(completer.has_open_quotes(s))
227 nt.assert_false(completer.has_open_quotes(s))
228
228
229
229
230 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
230 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
231 def test_abspath_file_completions():
231 def test_abspath_file_completions():
232 ip = get_ipython()
232 ip = get_ipython()
233 with TemporaryDirectory() as tmpdir:
233 with TemporaryDirectory() as tmpdir:
234 prefix = os.path.join(tmpdir, 'foo')
234 prefix = os.path.join(tmpdir, 'foo')
235 suffixes = ['1', '2']
235 suffixes = ['1', '2']
236 names = [prefix+s for s in suffixes]
236 names = [prefix+s for s in suffixes]
237 for n in names:
237 for n in names:
238 open(n, 'w').close()
238 open(n, 'w').close()
239
239
240 # Check simple completion
240 # Check simple completion
241 c = ip.complete(prefix)[1]
241 c = ip.complete(prefix)[1]
242 nt.assert_equal(c, names)
242 nt.assert_equal(c, names)
243
243
244 # Now check with a function call
244 # Now check with a function call
245 cmd = 'a = f("%s' % prefix
245 cmd = 'a = f("%s' % prefix
246 c = ip.complete(prefix, cmd)[1]
246 c = ip.complete(prefix, cmd)[1]
247 comp = [prefix+s for s in suffixes]
247 comp = [prefix+s for s in suffixes]
248 nt.assert_equal(c, comp)
248 nt.assert_equal(c, comp)
249
249
250
250
251 def test_local_file_completions():
251 def test_local_file_completions():
252 ip = get_ipython()
252 ip = get_ipython()
253 with TemporaryWorkingDirectory():
253 with TemporaryWorkingDirectory():
254 prefix = './foo'
254 prefix = './foo'
255 suffixes = ['1', '2']
255 suffixes = ['1', '2']
256 names = [prefix+s for s in suffixes]
256 names = [prefix+s for s in suffixes]
257 for n in names:
257 for n in names:
258 open(n, 'w').close()
258 open(n, 'w').close()
259
259
260 # Check simple completion
260 # Check simple completion
261 c = ip.complete(prefix)[1]
261 c = ip.complete(prefix)[1]
262 nt.assert_equal(c, names)
262 nt.assert_equal(c, names)
263
263
264 # Now check with a function call
264 # Now check with a function call
265 cmd = 'a = f("%s' % prefix
265 cmd = 'a = f("%s' % prefix
266 c = ip.complete(prefix, cmd)[1]
266 c = ip.complete(prefix, cmd)[1]
267 comp = set(prefix+s for s in suffixes)
267 comp = set(prefix+s for s in suffixes)
268 nt.assert_true(comp.issubset(set(c)))
268 nt.assert_true(comp.issubset(set(c)))
269
269
270
270
271 def test_greedy_completions():
271 def test_greedy_completions():
272 ip = get_ipython()
272 ip = get_ipython()
273 ip.ex('a=list(range(5))')
273 ip.ex('a=list(range(5))')
274 _,c = ip.complete('.',line='a[0].')
274 _,c = ip.complete('.',line='a[0].')
275 nt.assert_false('.real' in c,
275 nt.assert_false('.real' in c,
276 "Shouldn't have completed on a[0]: %s"%c)
276 "Shouldn't have completed on a[0]: %s"%c)
277 with greedy_completion():
277 with greedy_completion():
278 def _(line, cursor_pos, expect, message):
278 def _(line, cursor_pos, expect, message):
279 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
279 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
280 nt.assert_in(expect, c, message%c)
280 nt.assert_in(expect, c, message%c)
281
281
282 yield _, 'a[0].', 5, '.real', "Should have completed on a[0].: %s"
282 yield _, 'a[0].', 5, '.real', "Should have completed on a[0].: %s"
283 yield _, 'a[0].r', 6, '.real', "Should have completed on a[0].r: %s"
283 yield _, 'a[0].r', 6, '.real', "Should have completed on a[0].r: %s"
284
284
285 if sys.version_info > (3,4):
285 if sys.version_info > (3,4):
286 yield _, 'a[0].from_', 10, '.from_bytes', "Should have completed on a[0].from_: %s"
286 yield _, 'a[0].from_', 10, '.from_bytes', "Should have completed on a[0].from_: %s"
287
287
288
288
289 def _2():
289 def _2():
290 # jedi bug, this will be empty, makeitfail for now,
290 # jedi bug, this will be empty, makeitfail for now,
291 # once jedi is fixed, switch to assert_in
291 # once jedi is fixed, switch to assert_in
292 # https://github.com/davidhalter/jedi/issues/718
292 # https://github.com/davidhalter/jedi/issues/718
293 _,c = ip.complete('.',line='a[0].from', cursor_pos=9)
293 _,c = ip.complete('.',line='a[0].from', cursor_pos=9)
294 nt.assert_not_in('.from_bytes', c, "Should not have completed on a[0].from (jedi bug), if fails, update test to assert_in: %s"%c)
294 nt.assert_not_in('.from_bytes', c, "Should not have completed on a[0].from (jedi bug), if fails, update test to assert_in: %s"%c)
295 yield _2
295 yield _2
296
296
297
297
298
298
299 def test_omit__names():
299 def test_omit__names():
300 # also happens to test IPCompleter as a configurable
300 # also happens to test IPCompleter as a configurable
301 ip = get_ipython()
301 ip = get_ipython()
302 ip._hidden_attr = 1
302 ip._hidden_attr = 1
303 ip._x = {}
303 ip._x = {}
304 c = ip.Completer
304 c = ip.Completer
305 ip.ex('ip=get_ipython()')
305 ip.ex('ip=get_ipython()')
306 cfg = Config()
306 cfg = Config()
307 cfg.IPCompleter.omit__names = 0
307 cfg.IPCompleter.omit__names = 0
308 c.update_config(cfg)
308 c.update_config(cfg)
309 s,matches = c.complete('ip.')
309 s,matches = c.complete('ip.')
310 nt.assert_in('ip.__str__', matches)
310 nt.assert_in('ip.__str__', matches)
311 nt.assert_in('ip._hidden_attr', matches)
311 nt.assert_in('ip._hidden_attr', matches)
312 cfg = Config()
312 cfg = Config()
313 cfg.IPCompleter.omit__names = 1
313 cfg.IPCompleter.omit__names = 1
314 c.update_config(cfg)
314 c.update_config(cfg)
315 s,matches = c.complete('ip.')
315 s,matches = c.complete('ip.')
316 nt.assert_not_in('ip.__str__', matches)
316 nt.assert_not_in('ip.__str__', matches)
317 nt.assert_in('ip._hidden_attr', matches)
317 nt.assert_in('ip._hidden_attr', matches)
318 cfg = Config()
318 cfg = Config()
319 cfg.IPCompleter.omit__names = 2
319 cfg.IPCompleter.omit__names = 2
320 c.update_config(cfg)
320 c.update_config(cfg)
321 s,matches = c.complete('ip.')
321 s,matches = c.complete('ip.')
322 nt.assert_not_in('ip.__str__', matches)
322 nt.assert_not_in('ip.__str__', matches)
323 nt.assert_not_in('ip._hidden_attr', matches)
323 nt.assert_not_in('ip._hidden_attr', matches)
324 s,matches = c.complete('ip._x.')
324 s,matches = c.complete('ip._x.')
325 nt.assert_in('ip._x.keys', matches)
325 nt.assert_in('ip._x.keys', matches)
326 del ip._hidden_attr
326 del ip._hidden_attr
327
327
328
328
329 def test_limit_to__all__False_ok():
329 def test_limit_to__all__False_ok():
330 ip = get_ipython()
330 ip = get_ipython()
331 c = ip.Completer
331 c = ip.Completer
332 ip.ex('class D: x=24')
332 ip.ex('class D: x=24')
333 ip.ex('d=D()')
333 ip.ex('d=D()')
334 cfg = Config()
334 cfg = Config()
335 cfg.IPCompleter.limit_to__all__ = False
335 cfg.IPCompleter.limit_to__all__ = False
336 c.update_config(cfg)
336 c.update_config(cfg)
337 s, matches = c.complete('d.')
337 s, matches = c.complete('d.')
338 nt.assert_in('d.x', matches)
338 nt.assert_in('d.x', matches)
339
339
340
340
341 def test_get__all__entries_ok():
341 def test_get__all__entries_ok():
342 class A(object):
342 class A(object):
343 __all__ = ['x', 1]
343 __all__ = ['x', 1]
344 words = completer.get__all__entries(A())
344 words = completer.get__all__entries(A())
345 nt.assert_equal(words, ['x'])
345 nt.assert_equal(words, ['x'])
346
346
347
347
348 def test_get__all__entries_no__all__ok():
348 def test_get__all__entries_no__all__ok():
349 class A(object):
349 class A(object):
350 pass
350 pass
351 words = completer.get__all__entries(A())
351 words = completer.get__all__entries(A())
352 nt.assert_equal(words, [])
352 nt.assert_equal(words, [])
353
353
354
354
355 def test_func_kw_completions():
355 def test_func_kw_completions():
356 ip = get_ipython()
356 ip = get_ipython()
357 c = ip.Completer
357 c = ip.Completer
358 ip.ex('def myfunc(a=1,b=2): return a+b')
358 ip.ex('def myfunc(a=1,b=2): return a+b')
359 s, matches = c.complete(None, 'myfunc(1,b')
359 s, matches = c.complete(None, 'myfunc(1,b')
360 nt.assert_in('b=', matches)
360 nt.assert_in('b=', matches)
361 # Simulate completing with cursor right after b (pos==10):
361 # Simulate completing with cursor right after b (pos==10):
362 s, matches = c.complete(None, 'myfunc(1,b)', 10)
362 s, matches = c.complete(None, 'myfunc(1,b)', 10)
363 nt.assert_in('b=', matches)
363 nt.assert_in('b=', matches)
364 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
364 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
365 nt.assert_in('b=', matches)
365 nt.assert_in('b=', matches)
366 #builtin function
366 #builtin function
367 s, matches = c.complete(None, 'min(k, k')
367 s, matches = c.complete(None, 'min(k, k')
368 nt.assert_in('key=', matches)
368 nt.assert_in('key=', matches)
369
369
370
370
371 def test_default_arguments_from_docstring():
371 def test_default_arguments_from_docstring():
372 ip = get_ipython()
372 ip = get_ipython()
373 c = ip.Completer
373 c = ip.Completer
374 kwd = c._default_arguments_from_docstring(
374 kwd = c._default_arguments_from_docstring(
375 'min(iterable[, key=func]) -> value')
375 'min(iterable[, key=func]) -> value')
376 nt.assert_equal(kwd, ['key'])
376 nt.assert_equal(kwd, ['key'])
377 #with cython type etc
377 #with cython type etc
378 kwd = c._default_arguments_from_docstring(
378 kwd = c._default_arguments_from_docstring(
379 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
379 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
380 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
380 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
381 #white spaces
381 #white spaces
382 kwd = c._default_arguments_from_docstring(
382 kwd = c._default_arguments_from_docstring(
383 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
383 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
384 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
384 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
385
385
386 def test_line_magics():
386 def test_line_magics():
387 ip = get_ipython()
387 ip = get_ipython()
388 c = ip.Completer
388 c = ip.Completer
389 s, matches = c.complete(None, 'lsmag')
389 s, matches = c.complete(None, 'lsmag')
390 nt.assert_in('%lsmagic', matches)
390 nt.assert_in('%lsmagic', matches)
391 s, matches = c.complete(None, '%lsmag')
391 s, matches = c.complete(None, '%lsmag')
392 nt.assert_in('%lsmagic', matches)
392 nt.assert_in('%lsmagic', matches)
393
393
394
394
395 def test_cell_magics():
395 def test_cell_magics():
396 from IPython.core.magic import register_cell_magic
396 from IPython.core.magic import register_cell_magic
397
397
398 @register_cell_magic
398 @register_cell_magic
399 def _foo_cellm(line, cell):
399 def _foo_cellm(line, cell):
400 pass
400 pass
401
401
402 ip = get_ipython()
402 ip = get_ipython()
403 c = ip.Completer
403 c = ip.Completer
404
404
405 s, matches = c.complete(None, '_foo_ce')
405 s, matches = c.complete(None, '_foo_ce')
406 nt.assert_in('%%_foo_cellm', matches)
406 nt.assert_in('%%_foo_cellm', matches)
407 s, matches = c.complete(None, '%%_foo_ce')
407 s, matches = c.complete(None, '%%_foo_ce')
408 nt.assert_in('%%_foo_cellm', matches)
408 nt.assert_in('%%_foo_cellm', matches)
409
409
410
410
411 def test_line_cell_magics():
411 def test_line_cell_magics():
412 from IPython.core.magic import register_line_cell_magic
412 from IPython.core.magic import register_line_cell_magic
413
413
414 @register_line_cell_magic
414 @register_line_cell_magic
415 def _bar_cellm(line, cell):
415 def _bar_cellm(line, cell):
416 pass
416 pass
417
417
418 ip = get_ipython()
418 ip = get_ipython()
419 c = ip.Completer
419 c = ip.Completer
420
420
421 # The policy here is trickier, see comments in completion code. The
421 # The policy here is trickier, see comments in completion code. The
422 # returned values depend on whether the user passes %% or not explicitly,
422 # returned values depend on whether the user passes %% or not explicitly,
423 # and this will show a difference if the same name is both a line and cell
423 # and this will show a difference if the same name is both a line and cell
424 # magic.
424 # magic.
425 s, matches = c.complete(None, '_bar_ce')
425 s, matches = c.complete(None, '_bar_ce')
426 nt.assert_in('%_bar_cellm', matches)
426 nt.assert_in('%_bar_cellm', matches)
427 nt.assert_in('%%_bar_cellm', matches)
427 nt.assert_in('%%_bar_cellm', matches)
428 s, matches = c.complete(None, '%_bar_ce')
428 s, matches = c.complete(None, '%_bar_ce')
429 nt.assert_in('%_bar_cellm', matches)
429 nt.assert_in('%_bar_cellm', matches)
430 nt.assert_in('%%_bar_cellm', matches)
430 nt.assert_in('%%_bar_cellm', matches)
431 s, matches = c.complete(None, '%%_bar_ce')
431 s, matches = c.complete(None, '%%_bar_ce')
432 nt.assert_not_in('%_bar_cellm', matches)
432 nt.assert_not_in('%_bar_cellm', matches)
433 nt.assert_in('%%_bar_cellm', matches)
433 nt.assert_in('%%_bar_cellm', matches)
434
434
435
435
436 def test_magic_completion_order():
436 def test_magic_completion_order():
437
437
438 ip = get_ipython()
438 ip = get_ipython()
439 c = ip.Completer
439 c = ip.Completer
440
440
441 # Test ordering of magics and non-magics with the same name
441 # Test ordering of magics and non-magics with the same name
442 # We want the non-magic first
442 # We want the non-magic first
443
443
444 # Before importing matplotlib, there should only be one option:
444 # Before importing matplotlib, there should only be one option:
445
445
446 text, matches = c.complete('mat')
446 text, matches = c.complete('mat')
447 nt.assert_equal(matches, ["%matplotlib"])
447 nt.assert_equal(matches, ["%matplotlib"])
448
448
449
449
450 ip.run_cell("matplotlib = 1") # introduce name into namespace
450 ip.run_cell("matplotlib = 1") # introduce name into namespace
451
451
452 # After the import, there should be two options, ordered like this:
452 # After the import, there should be two options, ordered like this:
453 text, matches = c.complete('mat')
453 text, matches = c.complete('mat')
454 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
454 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
455
455
456
456
457 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
457 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
458
458
459 # Order of user variable and line and cell magics with same name:
459 # Order of user variable and line and cell magics with same name:
460 text, matches = c.complete('timeit')
460 text, matches = c.complete('timeit')
461 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
461 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
462
462
463
463
464 def test_dict_key_completion_string():
464 def test_dict_key_completion_string():
465 """Test dictionary key completion for string keys"""
465 """Test dictionary key completion for string keys"""
466 ip = get_ipython()
466 ip = get_ipython()
467 complete = ip.Completer.complete
467 complete = ip.Completer.complete
468
468
469 ip.user_ns['d'] = {'abc': None}
469 ip.user_ns['d'] = {'abc': None}
470
470
471 # check completion at different stages
471 # check completion at different stages
472 _, matches = complete(line_buffer="d[")
472 _, matches = complete(line_buffer="d[")
473 nt.assert_in("'abc'", matches)
473 nt.assert_in("'abc'", matches)
474 nt.assert_not_in("'abc']", matches)
474 nt.assert_not_in("'abc']", matches)
475
475
476 _, matches = complete(line_buffer="d['")
476 _, matches = complete(line_buffer="d['")
477 nt.assert_in("abc", matches)
477 nt.assert_in("abc", matches)
478 nt.assert_not_in("abc']", matches)
478 nt.assert_not_in("abc']", matches)
479
479
480 _, matches = complete(line_buffer="d['a")
480 _, matches = complete(line_buffer="d['a")
481 nt.assert_in("abc", matches)
481 nt.assert_in("abc", matches)
482 nt.assert_not_in("abc']", matches)
482 nt.assert_not_in("abc']", matches)
483
483
484 # check use of different quoting
484 # check use of different quoting
485 _, matches = complete(line_buffer="d[\"")
485 _, matches = complete(line_buffer="d[\"")
486 nt.assert_in("abc", matches)
486 nt.assert_in("abc", matches)
487 nt.assert_not_in('abc\"]', matches)
487 nt.assert_not_in('abc\"]', matches)
488
488
489 _, matches = complete(line_buffer="d[\"a")
489 _, matches = complete(line_buffer="d[\"a")
490 nt.assert_in("abc", matches)
490 nt.assert_in("abc", matches)
491 nt.assert_not_in('abc\"]', matches)
491 nt.assert_not_in('abc\"]', matches)
492
492
493 # check sensitivity to following context
493 # check sensitivity to following context
494 _, matches = complete(line_buffer="d[]", cursor_pos=2)
494 _, matches = complete(line_buffer="d[]", cursor_pos=2)
495 nt.assert_in("'abc'", matches)
495 nt.assert_in("'abc'", matches)
496
496
497 _, matches = complete(line_buffer="d['']", cursor_pos=3)
497 _, matches = complete(line_buffer="d['']", cursor_pos=3)
498 nt.assert_in("abc", matches)
498 nt.assert_in("abc", matches)
499 nt.assert_not_in("abc'", matches)
499 nt.assert_not_in("abc'", matches)
500 nt.assert_not_in("abc']", matches)
500 nt.assert_not_in("abc']", matches)
501
501
502 # check multiple solutions are correctly returned and that noise is not
502 # check multiple solutions are correctly returned and that noise is not
503 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
503 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
504 5: None}
504 5: None}
505
505
506 _, matches = complete(line_buffer="d['a")
506 _, matches = complete(line_buffer="d['a")
507 nt.assert_in("abc", matches)
507 nt.assert_in("abc", matches)
508 nt.assert_in("abd", matches)
508 nt.assert_in("abd", matches)
509 nt.assert_not_in("bad", matches)
509 nt.assert_not_in("bad", matches)
510 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
510 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
511
511
512 # check escaping and whitespace
512 # check escaping and whitespace
513 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
513 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
514 _, matches = complete(line_buffer="d['a")
514 _, matches = complete(line_buffer="d['a")
515 nt.assert_in("a\\nb", matches)
515 nt.assert_in("a\\nb", matches)
516 nt.assert_in("a\\'b", matches)
516 nt.assert_in("a\\'b", matches)
517 nt.assert_in("a\"b", matches)
517 nt.assert_in("a\"b", matches)
518 nt.assert_in("a word", matches)
518 nt.assert_in("a word", matches)
519 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
519 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
520
520
521 # - can complete on non-initial word of the string
521 # - can complete on non-initial word of the string
522 _, matches = complete(line_buffer="d['a w")
522 _, matches = complete(line_buffer="d['a w")
523 nt.assert_in("word", matches)
523 nt.assert_in("word", matches)
524
524
525 # - understands quote escaping
525 # - understands quote escaping
526 _, matches = complete(line_buffer="d['a\\'")
526 _, matches = complete(line_buffer="d['a\\'")
527 nt.assert_in("b", matches)
527 nt.assert_in("b", matches)
528
528
529 # - default quoting should work like repr
529 # - default quoting should work like repr
530 _, matches = complete(line_buffer="d[")
530 _, matches = complete(line_buffer="d[")
531 nt.assert_in("\"a'b\"", matches)
531 nt.assert_in("\"a'b\"", matches)
532
532
533 # - when opening quote with ", possible to match with unescaped apostrophe
533 # - when opening quote with ", possible to match with unescaped apostrophe
534 _, matches = complete(line_buffer="d[\"a'")
534 _, matches = complete(line_buffer="d[\"a'")
535 nt.assert_in("b", matches)
535 nt.assert_in("b", matches)
536
536
537 # need to not split at delims that readline won't split at
537 # need to not split at delims that readline won't split at
538 if '-' not in ip.Completer.splitter.delims:
538 if '-' not in ip.Completer.splitter.delims:
539 ip.user_ns['d'] = {'before-after': None}
539 ip.user_ns['d'] = {'before-after': None}
540 _, matches = complete(line_buffer="d['before-af")
540 _, matches = complete(line_buffer="d['before-af")
541 nt.assert_in('before-after', matches)
541 nt.assert_in('before-after', matches)
542
542
543 def test_dict_key_completion_contexts():
543 def test_dict_key_completion_contexts():
544 """Test expression contexts in which dict key completion occurs"""
544 """Test expression contexts in which dict key completion occurs"""
545 ip = get_ipython()
545 ip = get_ipython()
546 complete = ip.Completer.complete
546 complete = ip.Completer.complete
547 d = {'abc': None}
547 d = {'abc': None}
548 ip.user_ns['d'] = d
548 ip.user_ns['d'] = d
549
549
550 class C:
550 class C:
551 data = d
551 data = d
552 ip.user_ns['C'] = C
552 ip.user_ns['C'] = C
553 ip.user_ns['get'] = lambda: d
553 ip.user_ns['get'] = lambda: d
554
554
555 def assert_no_completion(**kwargs):
555 def assert_no_completion(**kwargs):
556 _, matches = complete(**kwargs)
556 _, matches = complete(**kwargs)
557 nt.assert_not_in('abc', matches)
557 nt.assert_not_in('abc', matches)
558 nt.assert_not_in('abc\'', matches)
558 nt.assert_not_in('abc\'', matches)
559 nt.assert_not_in('abc\']', matches)
559 nt.assert_not_in('abc\']', matches)
560 nt.assert_not_in('\'abc\'', matches)
560 nt.assert_not_in('\'abc\'', matches)
561 nt.assert_not_in('\'abc\']', matches)
561 nt.assert_not_in('\'abc\']', matches)
562
562
563 def assert_completion(**kwargs):
563 def assert_completion(**kwargs):
564 _, matches = complete(**kwargs)
564 _, matches = complete(**kwargs)
565 nt.assert_in("'abc'", matches)
565 nt.assert_in("'abc'", matches)
566 nt.assert_not_in("'abc']", matches)
566 nt.assert_not_in("'abc']", matches)
567
567
568 # no completion after string closed, even if reopened
568 # no completion after string closed, even if reopened
569 assert_no_completion(line_buffer="d['a'")
569 assert_no_completion(line_buffer="d['a'")
570 assert_no_completion(line_buffer="d[\"a\"")
570 assert_no_completion(line_buffer="d[\"a\"")
571 assert_no_completion(line_buffer="d['a' + ")
571 assert_no_completion(line_buffer="d['a' + ")
572 assert_no_completion(line_buffer="d['a' + '")
572 assert_no_completion(line_buffer="d['a' + '")
573
573
574 # completion in non-trivial expressions
574 # completion in non-trivial expressions
575 assert_completion(line_buffer="+ d[")
575 assert_completion(line_buffer="+ d[")
576 assert_completion(line_buffer="(d[")
576 assert_completion(line_buffer="(d[")
577 assert_completion(line_buffer="C.data[")
577 assert_completion(line_buffer="C.data[")
578
578
579 # greedy flag
579 # greedy flag
580 def assert_completion(**kwargs):
580 def assert_completion(**kwargs):
581 _, matches = complete(**kwargs)
581 _, matches = complete(**kwargs)
582 nt.assert_in("get()['abc']", matches)
582 nt.assert_in("get()['abc']", matches)
583
583
584 assert_no_completion(line_buffer="get()[")
584 assert_no_completion(line_buffer="get()[")
585 with greedy_completion():
585 with greedy_completion():
586 assert_completion(line_buffer="get()[")
586 assert_completion(line_buffer="get()[")
587 assert_completion(line_buffer="get()['")
587 assert_completion(line_buffer="get()['")
588 assert_completion(line_buffer="get()['a")
588 assert_completion(line_buffer="get()['a")
589 assert_completion(line_buffer="get()['ab")
589 assert_completion(line_buffer="get()['ab")
590 assert_completion(line_buffer="get()['abc")
590 assert_completion(line_buffer="get()['abc")
591
591
592
592
593
593
594 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
594 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
595 def test_dict_key_completion_bytes():
595 def test_dict_key_completion_bytes():
596 """Test handling of bytes in dict key completion"""
596 """Test handling of bytes in dict key completion"""
597 ip = get_ipython()
597 ip = get_ipython()
598 complete = ip.Completer.complete
598 complete = ip.Completer.complete
599
599
600 ip.user_ns['d'] = {'abc': None, b'abd': None}
600 ip.user_ns['d'] = {'abc': None, b'abd': None}
601
601
602 _, matches = complete(line_buffer="d[")
602 _, matches = complete(line_buffer="d[")
603 nt.assert_in("'abc'", matches)
603 nt.assert_in("'abc'", matches)
604 nt.assert_in("b'abd'", matches)
604 nt.assert_in("b'abd'", matches)
605
605
606 if False: # not currently implemented
606 if False: # not currently implemented
607 _, matches = complete(line_buffer="d[b")
607 _, matches = complete(line_buffer="d[b")
608 nt.assert_in("b'abd'", matches)
608 nt.assert_in("b'abd'", matches)
609 nt.assert_not_in("b'abc'", matches)
609 nt.assert_not_in("b'abc'", matches)
610
610
611 _, matches = complete(line_buffer="d[b'")
611 _, matches = complete(line_buffer="d[b'")
612 nt.assert_in("abd", matches)
612 nt.assert_in("abd", matches)
613 nt.assert_not_in("abc", matches)
613 nt.assert_not_in("abc", matches)
614
614
615 _, matches = complete(line_buffer="d[B'")
615 _, matches = complete(line_buffer="d[B'")
616 nt.assert_in("abd", matches)
616 nt.assert_in("abd", matches)
617 nt.assert_not_in("abc", matches)
617 nt.assert_not_in("abc", matches)
618
618
619 _, matches = complete(line_buffer="d['")
619 _, matches = complete(line_buffer="d['")
620 nt.assert_in("abc", matches)
620 nt.assert_in("abc", matches)
621 nt.assert_not_in("abd", matches)
621 nt.assert_not_in("abd", matches)
622
622
623
623
624 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
624 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
625 def test_dict_key_completion_unicode_py2():
625 def test_dict_key_completion_unicode_py2():
626 """Test handling of unicode in dict key completion"""
626 """Test handling of unicode in dict key completion"""
627 ip = get_ipython()
627 ip = get_ipython()
628 complete = ip.Completer.complete
628 complete = ip.Completer.complete
629
629
630 ip.user_ns['d'] = {u'abc': None,
630 ip.user_ns['d'] = {u'abc': None,
631 u'a\u05d0b': None}
631 u'a\u05d0b': None}
632
632
633 _, matches = complete(line_buffer="d[")
633 _, matches = complete(line_buffer="d[")
634 nt.assert_in("u'abc'", matches)
634 nt.assert_in("u'abc'", matches)
635 nt.assert_in("u'a\\u05d0b'", matches)
635 nt.assert_in("u'a\\u05d0b'", matches)
636
636
637 _, matches = complete(line_buffer="d['a")
637 _, matches = complete(line_buffer="d['a")
638 nt.assert_in("abc", matches)
638 nt.assert_in("abc", matches)
639 nt.assert_not_in("a\\u05d0b", matches)
639 nt.assert_not_in("a\\u05d0b", matches)
640
640
641 _, matches = complete(line_buffer="d[u'a")
641 _, matches = complete(line_buffer="d[u'a")
642 nt.assert_in("abc", matches)
642 nt.assert_in("abc", matches)
643 nt.assert_in("a\\u05d0b", matches)
643 nt.assert_in("a\\u05d0b", matches)
644
644
645 _, matches = complete(line_buffer="d[U'a")
645 _, matches = complete(line_buffer="d[U'a")
646 nt.assert_in("abc", matches)
646 nt.assert_in("abc", matches)
647 nt.assert_in("a\\u05d0b", matches)
647 nt.assert_in("a\\u05d0b", matches)
648
648
649 # query using escape
649 # query using escape
650 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
650 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
651 nt.assert_in("u05d0b", matches) # tokenized after \\
651 nt.assert_in("u05d0b", matches) # tokenized after \\
652
652
653 # query using character
653 # query using character
654 _, matches = complete(line_buffer=u"d[u'a\u05d0")
654 _, matches = complete(line_buffer=u"d[u'a\u05d0")
655 nt.assert_in(u"a\u05d0b", matches)
655 nt.assert_in(u"a\u05d0b", matches)
656
656
657 with greedy_completion():
657 with greedy_completion():
658 _, matches = complete(line_buffer="d[")
658 _, matches = complete(line_buffer="d[")
659 nt.assert_in("d[u'abc']", matches)
659 nt.assert_in("d[u'abc']", matches)
660 nt.assert_in("d[u'a\\u05d0b']", matches)
660 nt.assert_in("d[u'a\\u05d0b']", matches)
661
661
662 _, matches = complete(line_buffer="d['a")
662 _, matches = complete(line_buffer="d['a")
663 nt.assert_in("d['abc']", matches)
663 nt.assert_in("d['abc']", matches)
664 nt.assert_not_in("d[u'a\\u05d0b']", matches)
664 nt.assert_not_in("d[u'a\\u05d0b']", matches)
665
665
666 _, matches = complete(line_buffer="d[u'a")
666 _, matches = complete(line_buffer="d[u'a")
667 nt.assert_in("d[u'abc']", matches)
667 nt.assert_in("d[u'abc']", matches)
668 nt.assert_in("d[u'a\\u05d0b']", matches)
668 nt.assert_in("d[u'a\\u05d0b']", matches)
669
669
670 _, matches = complete(line_buffer="d[U'a")
670 _, matches = complete(line_buffer="d[U'a")
671 nt.assert_in("d[U'abc']", matches)
671 nt.assert_in("d[U'abc']", matches)
672 nt.assert_in("d[U'a\\u05d0b']", matches)
672 nt.assert_in("d[U'a\\u05d0b']", matches)
673
673
674 # query using escape
674 # query using escape
675 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
675 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
676 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
676 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
677
677
678 # query using character
678 # query using character
679 _, matches = complete(line_buffer=u"d[u'a\u05d0")
679 _, matches = complete(line_buffer=u"d[u'a\u05d0")
680 nt.assert_in(u"d[u'a\u05d0b']", matches)
680 nt.assert_in(u"d[u'a\u05d0b']", matches)
681
681
682
682
683 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
683 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
684 def test_dict_key_completion_unicode_py3():
684 def test_dict_key_completion_unicode_py3():
685 """Test handling of unicode in dict key completion"""
685 """Test handling of unicode in dict key completion"""
686 ip = get_ipython()
686 ip = get_ipython()
687 complete = ip.Completer.complete
687 complete = ip.Completer.complete
688
688
689 ip.user_ns['d'] = {u'a\u05d0': None}
689 ip.user_ns['d'] = {u'a\u05d0': None}
690
690
691 # query using escape
691 # query using escape
692 _, matches = complete(line_buffer="d['a\\u05d0")
692 _, matches = complete(line_buffer="d['a\\u05d0")
693 nt.assert_in("u05d0", matches) # tokenized after \\
693 nt.assert_in("u05d0", matches) # tokenized after \\
694
694
695 # query using character
695 # query using character
696 _, matches = complete(line_buffer="d['a\u05d0")
696 _, matches = complete(line_buffer="d['a\u05d0")
697 nt.assert_in(u"a\u05d0", matches)
697 nt.assert_in(u"a\u05d0", matches)
698
698
699 with greedy_completion():
699 with greedy_completion():
700 # query using escape
700 # query using escape
701 _, matches = complete(line_buffer="d['a\\u05d0")
701 _, matches = complete(line_buffer="d['a\\u05d0")
702 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
702 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
703
703
704 # query using character
704 # query using character
705 _, matches = complete(line_buffer="d['a\u05d0")
705 _, matches = complete(line_buffer="d['a\u05d0")
706 nt.assert_in(u"d['a\u05d0']", matches)
706 nt.assert_in(u"d['a\u05d0']", matches)
707
707
708
708
709
709
710 @dec.skip_without('numpy')
710 @dec.skip_without('numpy')
711 def test_struct_array_key_completion():
711 def test_struct_array_key_completion():
712 """Test dict key completion applies to numpy struct arrays"""
712 """Test dict key completion applies to numpy struct arrays"""
713 import numpy
713 import numpy
714 ip = get_ipython()
714 ip = get_ipython()
715 complete = ip.Completer.complete
715 complete = ip.Completer.complete
716 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
716 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
717 _, matches = complete(line_buffer="d['")
717 _, matches = complete(line_buffer="d['")
718 nt.assert_in("hello", matches)
718 nt.assert_in("hello", matches)
719 nt.assert_in("world", matches)
719 nt.assert_in("world", matches)
720 # complete on the numpy struct itself
720 # complete on the numpy struct itself
721 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
721 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
722 ('my_data', '>f4', 5)])
722 ('my_data', '>f4', 5)])
723 x = numpy.zeros(2, dtype=dt)
723 x = numpy.zeros(2, dtype=dt)
724 ip.user_ns['d'] = x[1]
724 ip.user_ns['d'] = x[1]
725 _, matches = complete(line_buffer="d['")
725 _, matches = complete(line_buffer="d['")
726 nt.assert_in("my_head", matches)
726 nt.assert_in("my_head", matches)
727 nt.assert_in("my_data", matches)
727 nt.assert_in("my_data", matches)
728 # complete on a nested level
728 # complete on a nested level
729 with greedy_completion():
729 with greedy_completion():
730 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
730 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
731 _, matches = complete(line_buffer="d[1]['my_head']['")
731 _, matches = complete(line_buffer="d[1]['my_head']['")
732 nt.assert_true(any(["my_dt" in m for m in matches]))
732 nt.assert_true(any(["my_dt" in m for m in matches]))
733 nt.assert_true(any(["my_df" in m for m in matches]))
733 nt.assert_true(any(["my_df" in m for m in matches]))
734
734
735
735
736 @dec.skip_without('pandas')
736 @dec.skip_without('pandas')
737 def test_dataframe_key_completion():
737 def test_dataframe_key_completion():
738 """Test dict key completion applies to pandas DataFrames"""
738 """Test dict key completion applies to pandas DataFrames"""
739 import pandas
739 import pandas
740 ip = get_ipython()
740 ip = get_ipython()
741 complete = ip.Completer.complete
741 complete = ip.Completer.complete
742 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
742 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
743 _, matches = complete(line_buffer="d['")
743 _, matches = complete(line_buffer="d['")
744 nt.assert_in("hello", matches)
744 nt.assert_in("hello", matches)
745 nt.assert_in("world", matches)
745 nt.assert_in("world", matches)
746
746
747
747
748 def test_dict_key_completion_invalids():
748 def test_dict_key_completion_invalids():
749 """Smoke test cases dict key completion can't handle"""
749 """Smoke test cases dict key completion can't handle"""
750 ip = get_ipython()
750 ip = get_ipython()
751 complete = ip.Completer.complete
751 complete = ip.Completer.complete
752
752
753 ip.user_ns['no_getitem'] = None
753 ip.user_ns['no_getitem'] = None
754 ip.user_ns['no_keys'] = []
754 ip.user_ns['no_keys'] = []
755 ip.user_ns['cant_call_keys'] = dict
755 ip.user_ns['cant_call_keys'] = dict
756 ip.user_ns['empty'] = {}
756 ip.user_ns['empty'] = {}
757 ip.user_ns['d'] = {'abc': 5}
757 ip.user_ns['d'] = {'abc': 5}
758
758
759 _, matches = complete(line_buffer="no_getitem['")
759 _, matches = complete(line_buffer="no_getitem['")
760 _, matches = complete(line_buffer="no_keys['")
760 _, matches = complete(line_buffer="no_keys['")
761 _, matches = complete(line_buffer="cant_call_keys['")
761 _, matches = complete(line_buffer="cant_call_keys['")
762 _, matches = complete(line_buffer="empty['")
762 _, matches = complete(line_buffer="empty['")
763 _, matches = complete(line_buffer="name_error['")
763 _, matches = complete(line_buffer="name_error['")
764 _, matches = complete(line_buffer="d['\\") # incomplete escape
764 _, matches = complete(line_buffer="d['\\") # incomplete escape
765
765
766 class KeyCompletable(object):
766 class KeyCompletable(object):
767 def __init__(self, things=()):
767 def __init__(self, things=()):
768 self.things = things
768 self.things = things
769
769
770 def _ipython_key_completions_(self):
770 def _ipython_key_completions_(self):
771 return list(self.things)
771 return list(self.things)
772
772
773 def test_object_key_completion():
773 def test_object_key_completion():
774 ip = get_ipython()
774 ip = get_ipython()
775 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
775 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
776
776
777 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
777 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
778 nt.assert_in('qwerty', matches)
778 nt.assert_in('qwerty', matches)
779 nt.assert_in('qwick', matches)
779 nt.assert_in('qwick', matches)
780
780
781
781
782 def test_aimport_module_completer():
782 def test_aimport_module_completer():
783 ip = get_ipython()
783 ip = get_ipython()
784 _, matches = ip.complete('i', '%aimport i')
784 _, matches = ip.complete('i', '%aimport i')
785 nt.assert_in('io', matches)
785 nt.assert_in('io', matches)
786 nt.assert_not_in('int', matches)
786 nt.assert_not_in('int', matches)
787
787
788 def test_nested_import_module_completer():
788 def test_nested_import_module_completer():
789 ip = get_ipython()
789 ip = get_ipython()
790 _, matches = ip.complete(None, 'import IPython.co', 17)
790 _, matches = ip.complete(None, 'import IPython.co', 17)
791 nt.assert_in('IPython.core', matches)
791 nt.assert_in('IPython.core', matches)
792 nt.assert_not_in('import IPython.core', matches)
792 nt.assert_not_in('import IPython.core', matches)
793 nt.assert_not_in('IPython.display', matches)
793 nt.assert_not_in('IPython.display', matches)
794
794
795 def test_import_module_completer():
795 def test_import_module_completer():
796 ip = get_ipython()
796 ip = get_ipython()
797 _, matches = ip.complete('i', 'import i')
797 _, matches = ip.complete('i', 'import i')
798 nt.assert_in('io', matches)
798 nt.assert_in('io', matches)
799 nt.assert_not_in('int', matches)
799 nt.assert_not_in('int', matches)
800
800
801 def test_from_module_completer():
801 def test_from_module_completer():
802 ip = get_ipython()
802 ip = get_ipython()
803 _, matches = ip.complete('B', 'from io import B')
803 _, matches = ip.complete('B', 'from io import B', 16)
804 nt.assert_in('BytesIO', matches)
804 nt.assert_in('BytesIO', matches)
805 nt.assert_not_in('BaseException', matches)
805 nt.assert_not_in('BaseException', matches)
General Comments 0
You need to be logged in to leave comments. Login now