##// END OF EJS Templates
Allow completion on the numpy struct itself
mbyt -
Show More
@@ -1,1171 +1,1172 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Word completion for IPython.
2 """Word completion for IPython.
3
3
4 This module is a fork of the rlcompleter module in the Python standard
4 This module is a fork of the rlcompleter module in the Python standard
5 library. The original enhancements made to rlcompleter have been sent
5 library. The original enhancements made to rlcompleter have been sent
6 upstream and were accepted as of Python 2.3, but we need a lot more
6 upstream and were accepted as of Python 2.3, but we need a lot more
7 functionality specific to IPython, so this module will continue to live as an
7 functionality specific to IPython, so this module will continue to live as an
8 IPython-specific utility.
8 IPython-specific utility.
9
9
10 Original rlcompleter documentation:
10 Original rlcompleter documentation:
11
11
12 This requires the latest extension to the readline module (the
12 This requires the latest extension to the readline module (the
13 completes keywords, built-ins and globals in __main__; when completing
13 completes keywords, built-ins and globals in __main__; when completing
14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
15 completes its attributes.
15 completes its attributes.
16
16
17 It's very cool to do "import string" type "string.", hit the
17 It's very cool to do "import string" type "string.", hit the
18 completion key (twice), and see the list of names defined by the
18 completion key (twice), and see the list of names defined by the
19 string module!
19 string module!
20
20
21 Tip: to use the tab key as the completion key, call
21 Tip: to use the tab key as the completion key, call
22
22
23 readline.parse_and_bind("tab: complete")
23 readline.parse_and_bind("tab: complete")
24
24
25 Notes:
25 Notes:
26
26
27 - Exceptions raised by the completer function are *ignored* (and
27 - Exceptions raised by the completer function are *ignored* (and
28 generally cause the completion to fail). This is a feature -- since
28 generally cause the completion to fail). This is a feature -- since
29 readline sets the tty device in raw (or cbreak) mode, printing a
29 readline sets the tty device in raw (or cbreak) mode, printing a
30 traceback wouldn't work well without some complicated hoopla to save,
30 traceback wouldn't work well without some complicated hoopla to save,
31 reset and restore the tty state.
31 reset and restore the tty state.
32
32
33 - The evaluation of the NAME.NAME... form may cause arbitrary
33 - The evaluation of the NAME.NAME... form may cause arbitrary
34 application defined code to be executed if an object with a
34 application defined code to be executed if an object with a
35 ``__getattr__`` hook is found. Since it is the responsibility of the
35 ``__getattr__`` hook is found. Since it is the responsibility of the
36 application (or the user) to enable this feature, I consider this an
36 application (or the user) to enable this feature, I consider this an
37 acceptable risk. More complicated expressions (e.g. function calls or
37 acceptable risk. More complicated expressions (e.g. function calls or
38 indexing operations) are *not* evaluated.
38 indexing operations) are *not* evaluated.
39
39
40 - GNU readline is also used by the built-in functions input() and
40 - GNU readline is also used by the built-in functions input() and
41 raw_input(), and thus these also benefit/suffer from the completer
41 raw_input(), and thus these also benefit/suffer from the completer
42 features. Clearly an interactive application can benefit by
42 features. Clearly an interactive application can benefit by
43 specifying its own completer function and using raw_input() for all
43 specifying its own completer function and using raw_input() for all
44 its input.
44 its input.
45
45
46 - When the original stdin is not a tty device, GNU readline is never
46 - When the original stdin is not a tty device, GNU readline is never
47 used, and this module (and the readline module) are silently inactive.
47 used, and this module (and the readline module) are silently inactive.
48 """
48 """
49
49
50 # Copyright (c) IPython Development Team.
50 # Copyright (c) IPython Development Team.
51 # Distributed under the terms of the Modified BSD License.
51 # Distributed under the terms of the Modified BSD License.
52 #
52 #
53 # Some of this code originated from rlcompleter in the Python standard library
53 # Some of this code originated from rlcompleter in the Python standard library
54 # Copyright (C) 2001 Python Software Foundation, www.python.org
54 # Copyright (C) 2001 Python Software Foundation, www.python.org
55
55
56 import __main__
56 import __main__
57 import glob
57 import glob
58 import inspect
58 import inspect
59 import itertools
59 import itertools
60 import keyword
60 import keyword
61 import os
61 import os
62 import re
62 import re
63 import sys
63 import sys
64
64
65 from IPython.config.configurable import Configurable
65 from IPython.config.configurable import Configurable
66 from IPython.core.error import TryNext
66 from IPython.core.error import TryNext
67 from IPython.core.inputsplitter import ESC_MAGIC
67 from IPython.core.inputsplitter import ESC_MAGIC
68 from IPython.core.latex_symbols import latex_symbols
68 from IPython.core.latex_symbols import latex_symbols
69 from IPython.utils import generics
69 from IPython.utils import generics
70 from IPython.utils import io
70 from IPython.utils import io
71 from IPython.utils.decorators import undoc
71 from IPython.utils.decorators import undoc
72 from IPython.utils.dir2 import dir2
72 from IPython.utils.dir2 import dir2
73 from IPython.utils.process import arg_split
73 from IPython.utils.process import arg_split
74 from IPython.utils.py3compat import builtin_mod, string_types, PY3
74 from IPython.utils.py3compat import builtin_mod, string_types, PY3
75 from IPython.utils.traitlets import CBool, Enum
75 from IPython.utils.traitlets import CBool, Enum
76
76
77 #-----------------------------------------------------------------------------
77 #-----------------------------------------------------------------------------
78 # Globals
78 # Globals
79 #-----------------------------------------------------------------------------
79 #-----------------------------------------------------------------------------
80
80
81 # Public API
81 # Public API
82 __all__ = ['Completer','IPCompleter']
82 __all__ = ['Completer','IPCompleter']
83
83
84 if sys.platform == 'win32':
84 if sys.platform == 'win32':
85 PROTECTABLES = ' '
85 PROTECTABLES = ' '
86 else:
86 else:
87 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
87 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
88
88
89
89
90 #-----------------------------------------------------------------------------
90 #-----------------------------------------------------------------------------
91 # Main functions and classes
91 # Main functions and classes
92 #-----------------------------------------------------------------------------
92 #-----------------------------------------------------------------------------
93
93
94 def has_open_quotes(s):
94 def has_open_quotes(s):
95 """Return whether a string has open quotes.
95 """Return whether a string has open quotes.
96
96
97 This simply counts whether the number of quote characters of either type in
97 This simply counts whether the number of quote characters of either type in
98 the string is odd.
98 the string is odd.
99
99
100 Returns
100 Returns
101 -------
101 -------
102 If there is an open quote, the quote character is returned. Else, return
102 If there is an open quote, the quote character is returned. Else, return
103 False.
103 False.
104 """
104 """
105 # We check " first, then ', so complex cases with nested quotes will get
105 # We check " first, then ', so complex cases with nested quotes will get
106 # the " to take precedence.
106 # the " to take precedence.
107 if s.count('"') % 2:
107 if s.count('"') % 2:
108 return '"'
108 return '"'
109 elif s.count("'") % 2:
109 elif s.count("'") % 2:
110 return "'"
110 return "'"
111 else:
111 else:
112 return False
112 return False
113
113
114
114
115 def protect_filename(s):
115 def protect_filename(s):
116 """Escape a string to protect certain characters."""
116 """Escape a string to protect certain characters."""
117
117
118 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
118 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
119 for ch in s])
119 for ch in s])
120
120
121 def expand_user(path):
121 def expand_user(path):
122 """Expand '~'-style usernames in strings.
122 """Expand '~'-style usernames in strings.
123
123
124 This is similar to :func:`os.path.expanduser`, but it computes and returns
124 This is similar to :func:`os.path.expanduser`, but it computes and returns
125 extra information that will be useful if the input was being used in
125 extra information that will be useful if the input was being used in
126 computing completions, and you wish to return the completions with the
126 computing completions, and you wish to return the completions with the
127 original '~' instead of its expanded value.
127 original '~' instead of its expanded value.
128
128
129 Parameters
129 Parameters
130 ----------
130 ----------
131 path : str
131 path : str
132 String to be expanded. If no ~ is present, the output is the same as the
132 String to be expanded. If no ~ is present, the output is the same as the
133 input.
133 input.
134
134
135 Returns
135 Returns
136 -------
136 -------
137 newpath : str
137 newpath : str
138 Result of ~ expansion in the input path.
138 Result of ~ expansion in the input path.
139 tilde_expand : bool
139 tilde_expand : bool
140 Whether any expansion was performed or not.
140 Whether any expansion was performed or not.
141 tilde_val : str
141 tilde_val : str
142 The value that ~ was replaced with.
142 The value that ~ was replaced with.
143 """
143 """
144 # Default values
144 # Default values
145 tilde_expand = False
145 tilde_expand = False
146 tilde_val = ''
146 tilde_val = ''
147 newpath = path
147 newpath = path
148
148
149 if path.startswith('~'):
149 if path.startswith('~'):
150 tilde_expand = True
150 tilde_expand = True
151 rest = len(path)-1
151 rest = len(path)-1
152 newpath = os.path.expanduser(path)
152 newpath = os.path.expanduser(path)
153 if rest:
153 if rest:
154 tilde_val = newpath[:-rest]
154 tilde_val = newpath[:-rest]
155 else:
155 else:
156 tilde_val = newpath
156 tilde_val = newpath
157
157
158 return newpath, tilde_expand, tilde_val
158 return newpath, tilde_expand, tilde_val
159
159
160
160
161 def compress_user(path, tilde_expand, tilde_val):
161 def compress_user(path, tilde_expand, tilde_val):
162 """Does the opposite of expand_user, with its outputs.
162 """Does the opposite of expand_user, with its outputs.
163 """
163 """
164 if tilde_expand:
164 if tilde_expand:
165 return path.replace(tilde_val, '~')
165 return path.replace(tilde_val, '~')
166 else:
166 else:
167 return path
167 return path
168
168
169
169
170
170
171 def penalize_magics_key(word):
171 def penalize_magics_key(word):
172 """key for sorting that penalizes magic commands in the ordering
172 """key for sorting that penalizes magic commands in the ordering
173
173
174 Normal words are left alone.
174 Normal words are left alone.
175
175
176 Magic commands have the initial % moved to the end, e.g.
176 Magic commands have the initial % moved to the end, e.g.
177 %matplotlib is transformed as follows:
177 %matplotlib is transformed as follows:
178
178
179 %matplotlib -> matplotlib%
179 %matplotlib -> matplotlib%
180
180
181 [The choice of the final % is arbitrary.]
181 [The choice of the final % is arbitrary.]
182
182
183 Since "matplotlib" < "matplotlib%" as strings,
183 Since "matplotlib" < "matplotlib%" as strings,
184 "timeit" will appear before the magic "%timeit" in the ordering
184 "timeit" will appear before the magic "%timeit" in the ordering
185
185
186 For consistency, move "%%" to the end, so cell magics appear *after*
186 For consistency, move "%%" to the end, so cell magics appear *after*
187 line magics with the same name.
187 line magics with the same name.
188
188
189 A check is performed that there are no other "%" in the string;
189 A check is performed that there are no other "%" in the string;
190 if there are, then the string is not a magic command and is left unchanged.
190 if there are, then the string is not a magic command and is left unchanged.
191
191
192 """
192 """
193
193
194 # Move any % signs from start to end of the key
194 # Move any % signs from start to end of the key
195 # provided there are no others elsewhere in the string
195 # provided there are no others elsewhere in the string
196
196
197 if word[:2] == "%%":
197 if word[:2] == "%%":
198 if not "%" in word[2:]:
198 if not "%" in word[2:]:
199 return word[2:] + "%%"
199 return word[2:] + "%%"
200
200
201 if word[:1] == "%":
201 if word[:1] == "%":
202 if not "%" in word[1:]:
202 if not "%" in word[1:]:
203 return word[1:] + "%"
203 return word[1:] + "%"
204
204
205 return word
205 return word
206
206
207
207
208 @undoc
208 @undoc
209 class Bunch(object): pass
209 class Bunch(object): pass
210
210
211
211
212 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
212 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
213 GREEDY_DELIMS = ' =\r\n'
213 GREEDY_DELIMS = ' =\r\n'
214
214
215
215
216 class CompletionSplitter(object):
216 class CompletionSplitter(object):
217 """An object to split an input line in a manner similar to readline.
217 """An object to split an input line in a manner similar to readline.
218
218
219 By having our own implementation, we can expose readline-like completion in
219 By having our own implementation, we can expose readline-like completion in
220 a uniform manner to all frontends. This object only needs to be given the
220 a uniform manner to all frontends. This object only needs to be given the
221 line of text to be split and the cursor position on said line, and it
221 line of text to be split and the cursor position on said line, and it
222 returns the 'word' to be completed on at the cursor after splitting the
222 returns the 'word' to be completed on at the cursor after splitting the
223 entire line.
223 entire line.
224
224
225 What characters are used as splitting delimiters can be controlled by
225 What characters are used as splitting delimiters can be controlled by
226 setting the `delims` attribute (this is a property that internally
226 setting the `delims` attribute (this is a property that internally
227 automatically builds the necessary regular expression)"""
227 automatically builds the necessary regular expression)"""
228
228
229 # Private interface
229 # Private interface
230
230
231 # A string of delimiter characters. The default value makes sense for
231 # A string of delimiter characters. The default value makes sense for
232 # IPython's most typical usage patterns.
232 # IPython's most typical usage patterns.
233 _delims = DELIMS
233 _delims = DELIMS
234
234
235 # The expression (a normal string) to be compiled into a regular expression
235 # The expression (a normal string) to be compiled into a regular expression
236 # for actual splitting. We store it as an attribute mostly for ease of
236 # for actual splitting. We store it as an attribute mostly for ease of
237 # debugging, since this type of code can be so tricky to debug.
237 # debugging, since this type of code can be so tricky to debug.
238 _delim_expr = None
238 _delim_expr = None
239
239
240 # The regular expression that does the actual splitting
240 # The regular expression that does the actual splitting
241 _delim_re = None
241 _delim_re = None
242
242
243 def __init__(self, delims=None):
243 def __init__(self, delims=None):
244 delims = CompletionSplitter._delims if delims is None else delims
244 delims = CompletionSplitter._delims if delims is None else delims
245 self.delims = delims
245 self.delims = delims
246
246
247 @property
247 @property
248 def delims(self):
248 def delims(self):
249 """Return the string of delimiter characters."""
249 """Return the string of delimiter characters."""
250 return self._delims
250 return self._delims
251
251
252 @delims.setter
252 @delims.setter
253 def delims(self, delims):
253 def delims(self, delims):
254 """Set the delimiters for line splitting."""
254 """Set the delimiters for line splitting."""
255 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
255 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
256 self._delim_re = re.compile(expr)
256 self._delim_re = re.compile(expr)
257 self._delims = delims
257 self._delims = delims
258 self._delim_expr = expr
258 self._delim_expr = expr
259
259
260 def split_line(self, line, cursor_pos=None):
260 def split_line(self, line, cursor_pos=None):
261 """Split a line of text with a cursor at the given position.
261 """Split a line of text with a cursor at the given position.
262 """
262 """
263 l = line if cursor_pos is None else line[:cursor_pos]
263 l = line if cursor_pos is None else line[:cursor_pos]
264 return self._delim_re.split(l)[-1]
264 return self._delim_re.split(l)[-1]
265
265
266
266
267 class Completer(Configurable):
267 class Completer(Configurable):
268
268
269 greedy = CBool(False, config=True,
269 greedy = CBool(False, config=True,
270 help="""Activate greedy completion
270 help="""Activate greedy completion
271
271
272 This will enable completion on elements of lists, results of function calls, etc.,
272 This will enable completion on elements of lists, results of function calls, etc.,
273 but can be unsafe because the code is actually evaluated on TAB.
273 but can be unsafe because the code is actually evaluated on TAB.
274 """
274 """
275 )
275 )
276
276
277
277
278 def __init__(self, namespace=None, global_namespace=None, **kwargs):
278 def __init__(self, namespace=None, global_namespace=None, **kwargs):
279 """Create a new completer for the command line.
279 """Create a new completer for the command line.
280
280
281 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
281 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
282
282
283 If unspecified, the default namespace where completions are performed
283 If unspecified, the default namespace where completions are performed
284 is __main__ (technically, __main__.__dict__). Namespaces should be
284 is __main__ (technically, __main__.__dict__). Namespaces should be
285 given as dictionaries.
285 given as dictionaries.
286
286
287 An optional second namespace can be given. This allows the completer
287 An optional second namespace can be given. This allows the completer
288 to handle cases where both the local and global scopes need to be
288 to handle cases where both the local and global scopes need to be
289 distinguished.
289 distinguished.
290
290
291 Completer instances should be used as the completion mechanism of
291 Completer instances should be used as the completion mechanism of
292 readline via the set_completer() call:
292 readline via the set_completer() call:
293
293
294 readline.set_completer(Completer(my_namespace).complete)
294 readline.set_completer(Completer(my_namespace).complete)
295 """
295 """
296
296
297 # Don't bind to namespace quite yet, but flag whether the user wants a
297 # Don't bind to namespace quite yet, but flag whether the user wants a
298 # specific namespace or to use __main__.__dict__. This will allow us
298 # specific namespace or to use __main__.__dict__. This will allow us
299 # to bind to __main__.__dict__ at completion time, not now.
299 # to bind to __main__.__dict__ at completion time, not now.
300 if namespace is None:
300 if namespace is None:
301 self.use_main_ns = 1
301 self.use_main_ns = 1
302 else:
302 else:
303 self.use_main_ns = 0
303 self.use_main_ns = 0
304 self.namespace = namespace
304 self.namespace = namespace
305
305
306 # The global namespace, if given, can be bound directly
306 # The global namespace, if given, can be bound directly
307 if global_namespace is None:
307 if global_namespace is None:
308 self.global_namespace = {}
308 self.global_namespace = {}
309 else:
309 else:
310 self.global_namespace = global_namespace
310 self.global_namespace = global_namespace
311
311
312 super(Completer, self).__init__(**kwargs)
312 super(Completer, self).__init__(**kwargs)
313
313
314 def complete(self, text, state):
314 def complete(self, text, state):
315 """Return the next possible completion for 'text'.
315 """Return the next possible completion for 'text'.
316
316
317 This is called successively with state == 0, 1, 2, ... until it
317 This is called successively with state == 0, 1, 2, ... until it
318 returns None. The completion should begin with 'text'.
318 returns None. The completion should begin with 'text'.
319
319
320 """
320 """
321 if self.use_main_ns:
321 if self.use_main_ns:
322 self.namespace = __main__.__dict__
322 self.namespace = __main__.__dict__
323
323
324 if state == 0:
324 if state == 0:
325 if "." in text:
325 if "." in text:
326 self.matches = self.attr_matches(text)
326 self.matches = self.attr_matches(text)
327 else:
327 else:
328 self.matches = self.global_matches(text)
328 self.matches = self.global_matches(text)
329 try:
329 try:
330 return self.matches[state]
330 return self.matches[state]
331 except IndexError:
331 except IndexError:
332 return None
332 return None
333
333
334 def global_matches(self, text):
334 def global_matches(self, text):
335 """Compute matches when text is a simple name.
335 """Compute matches when text is a simple name.
336
336
337 Return a list of all keywords, built-in functions and names currently
337 Return a list of all keywords, built-in functions and names currently
338 defined in self.namespace or self.global_namespace that match.
338 defined in self.namespace or self.global_namespace that match.
339
339
340 """
340 """
341 #print 'Completer->global_matches, txt=%r' % text # dbg
341 #print 'Completer->global_matches, txt=%r' % text # dbg
342 matches = []
342 matches = []
343 match_append = matches.append
343 match_append = matches.append
344 n = len(text)
344 n = len(text)
345 for lst in [keyword.kwlist,
345 for lst in [keyword.kwlist,
346 builtin_mod.__dict__.keys(),
346 builtin_mod.__dict__.keys(),
347 self.namespace.keys(),
347 self.namespace.keys(),
348 self.global_namespace.keys()]:
348 self.global_namespace.keys()]:
349 for word in lst:
349 for word in lst:
350 if word[:n] == text and word != "__builtins__":
350 if word[:n] == text and word != "__builtins__":
351 match_append(word)
351 match_append(word)
352 return matches
352 return matches
353
353
354 def attr_matches(self, text):
354 def attr_matches(self, text):
355 """Compute matches when text contains a dot.
355 """Compute matches when text contains a dot.
356
356
357 Assuming the text is of the form NAME.NAME....[NAME], and is
357 Assuming the text is of the form NAME.NAME....[NAME], and is
358 evaluatable in self.namespace or self.global_namespace, it will be
358 evaluatable in self.namespace or self.global_namespace, it will be
359 evaluated and its attributes (as revealed by dir()) are used as
359 evaluated and its attributes (as revealed by dir()) are used as
360 possible completions. (For class instances, class members are are
360 possible completions. (For class instances, class members are are
361 also considered.)
361 also considered.)
362
362
363 WARNING: this can still invoke arbitrary C code, if an object
363 WARNING: this can still invoke arbitrary C code, if an object
364 with a __getattr__ hook is evaluated.
364 with a __getattr__ hook is evaluated.
365
365
366 """
366 """
367
367
368 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
368 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
369 # Another option, seems to work great. Catches things like ''.<tab>
369 # Another option, seems to work great. Catches things like ''.<tab>
370 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
370 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
371
371
372 if m:
372 if m:
373 expr, attr = m.group(1, 3)
373 expr, attr = m.group(1, 3)
374 elif self.greedy:
374 elif self.greedy:
375 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
375 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
376 if not m2:
376 if not m2:
377 return []
377 return []
378 expr, attr = m2.group(1,2)
378 expr, attr = m2.group(1,2)
379 else:
379 else:
380 return []
380 return []
381
381
382 try:
382 try:
383 obj = eval(expr, self.namespace)
383 obj = eval(expr, self.namespace)
384 except:
384 except:
385 try:
385 try:
386 obj = eval(expr, self.global_namespace)
386 obj = eval(expr, self.global_namespace)
387 except:
387 except:
388 return []
388 return []
389
389
390 if self.limit_to__all__ and hasattr(obj, '__all__'):
390 if self.limit_to__all__ and hasattr(obj, '__all__'):
391 words = get__all__entries(obj)
391 words = get__all__entries(obj)
392 else:
392 else:
393 words = dir2(obj)
393 words = dir2(obj)
394
394
395 try:
395 try:
396 words = generics.complete_object(obj, words)
396 words = generics.complete_object(obj, words)
397 except TryNext:
397 except TryNext:
398 pass
398 pass
399 except Exception:
399 except Exception:
400 # Silence errors from completion function
400 # Silence errors from completion function
401 #raise # dbg
401 #raise # dbg
402 pass
402 pass
403 # Build match list to return
403 # Build match list to return
404 n = len(attr)
404 n = len(attr)
405 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
405 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
406 return res
406 return res
407
407
408
408
409 def get__all__entries(obj):
409 def get__all__entries(obj):
410 """returns the strings in the __all__ attribute"""
410 """returns the strings in the __all__ attribute"""
411 try:
411 try:
412 words = getattr(obj, '__all__')
412 words = getattr(obj, '__all__')
413 except:
413 except:
414 return []
414 return []
415
415
416 return [w for w in words if isinstance(w, string_types)]
416 return [w for w in words if isinstance(w, string_types)]
417
417
418
418
419 def match_dict_keys(keys, prefix):
419 def match_dict_keys(keys, prefix):
420 """Used by dict_key_matches, matching the prefix to a list of keys"""
420 """Used by dict_key_matches, matching the prefix to a list of keys"""
421 if not prefix:
421 if not prefix:
422 return None, 0, [repr(k) for k in keys
422 return None, 0, [repr(k) for k in keys
423 if isinstance(k, (string_types, bytes))]
423 if isinstance(k, (string_types, bytes))]
424 quote_match = re.search('["\']', prefix)
424 quote_match = re.search('["\']', prefix)
425 quote = quote_match.group()
425 quote = quote_match.group()
426 try:
426 try:
427 prefix_str = eval(prefix + quote, {})
427 prefix_str = eval(prefix + quote, {})
428 except Exception:
428 except Exception:
429 return None, 0, []
429 return None, 0, []
430
430
431 token_match = re.search(r'\w*$', prefix, re.UNICODE)
431 token_match = re.search(r'\w*$', prefix, re.UNICODE)
432 token_start = token_match.start()
432 token_start = token_match.start()
433 token_prefix = token_match.group()
433 token_prefix = token_match.group()
434
434
435 # TODO: support bytes in Py3k
435 # TODO: support bytes in Py3k
436 matched = []
436 matched = []
437 for key in keys:
437 for key in keys:
438 try:
438 try:
439 if not key.startswith(prefix_str):
439 if not key.startswith(prefix_str):
440 continue
440 continue
441 except (AttributeError, TypeError, UnicodeError):
441 except (AttributeError, TypeError, UnicodeError):
442 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
442 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
443 continue
443 continue
444
444
445 # reformat remainder of key to begin with prefix
445 # reformat remainder of key to begin with prefix
446 rem = key[len(prefix_str):]
446 rem = key[len(prefix_str):]
447 # force repr wrapped in '
447 # force repr wrapped in '
448 rem_repr = repr(rem + '"')
448 rem_repr = repr(rem + '"')
449 if rem_repr.startswith('u') and prefix[0] not in 'uU':
449 if rem_repr.startswith('u') and prefix[0] not in 'uU':
450 # Found key is unicode, but prefix is Py2 string.
450 # Found key is unicode, but prefix is Py2 string.
451 # Therefore attempt to interpret key as string.
451 # Therefore attempt to interpret key as string.
452 try:
452 try:
453 rem_repr = repr(rem.encode('ascii') + '"')
453 rem_repr = repr(rem.encode('ascii') + '"')
454 except UnicodeEncodeError:
454 except UnicodeEncodeError:
455 continue
455 continue
456
456
457 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
457 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
458 if quote == '"':
458 if quote == '"':
459 # The entered prefix is quoted with ",
459 # The entered prefix is quoted with ",
460 # but the match is quoted with '.
460 # but the match is quoted with '.
461 # A contained " hence needs escaping for comparison:
461 # A contained " hence needs escaping for comparison:
462 rem_repr = rem_repr.replace('"', '\\"')
462 rem_repr = rem_repr.replace('"', '\\"')
463
463
464 # then reinsert prefix from start of token
464 # then reinsert prefix from start of token
465 matched.append('%s%s' % (token_prefix, rem_repr))
465 matched.append('%s%s' % (token_prefix, rem_repr))
466 return quote, token_start, matched
466 return quote, token_start, matched
467
467
468
468
469 def _safe_isinstance(obj, module, class_name):
469 def _safe_isinstance(obj, module, class_name):
470 """Checks if obj is an instance of module.class_name if loaded
470 """Checks if obj is an instance of module.class_name if loaded
471 """
471 """
472 return (module in sys.modules and
472 return (module in sys.modules and
473 isinstance(obj, getattr(__import__(module), class_name)))
473 isinstance(obj, getattr(__import__(module), class_name)))
474
474
475
475
476
476
477 class IPCompleter(Completer):
477 class IPCompleter(Completer):
478 """Extension of the completer class with IPython-specific features"""
478 """Extension of the completer class with IPython-specific features"""
479
479
480 def _greedy_changed(self, name, old, new):
480 def _greedy_changed(self, name, old, new):
481 """update the splitter and readline delims when greedy is changed"""
481 """update the splitter and readline delims when greedy is changed"""
482 if new:
482 if new:
483 self.splitter.delims = GREEDY_DELIMS
483 self.splitter.delims = GREEDY_DELIMS
484 else:
484 else:
485 self.splitter.delims = DELIMS
485 self.splitter.delims = DELIMS
486
486
487 if self.readline:
487 if self.readline:
488 self.readline.set_completer_delims(self.splitter.delims)
488 self.readline.set_completer_delims(self.splitter.delims)
489
489
490 merge_completions = CBool(True, config=True,
490 merge_completions = CBool(True, config=True,
491 help="""Whether to merge completion results into a single list
491 help="""Whether to merge completion results into a single list
492
492
493 If False, only the completion results from the first non-empty
493 If False, only the completion results from the first non-empty
494 completer will be returned.
494 completer will be returned.
495 """
495 """
496 )
496 )
497 omit__names = Enum((0,1,2), default_value=2, config=True,
497 omit__names = Enum((0,1,2), default_value=2, config=True,
498 help="""Instruct the completer to omit private method names
498 help="""Instruct the completer to omit private method names
499
499
500 Specifically, when completing on ``object.<tab>``.
500 Specifically, when completing on ``object.<tab>``.
501
501
502 When 2 [default]: all names that start with '_' will be excluded.
502 When 2 [default]: all names that start with '_' will be excluded.
503
503
504 When 1: all 'magic' names (``__foo__``) will be excluded.
504 When 1: all 'magic' names (``__foo__``) will be excluded.
505
505
506 When 0: nothing will be excluded.
506 When 0: nothing will be excluded.
507 """
507 """
508 )
508 )
509 limit_to__all__ = CBool(default_value=False, config=True,
509 limit_to__all__ = CBool(default_value=False, config=True,
510 help="""Instruct the completer to use __all__ for the completion
510 help="""Instruct the completer to use __all__ for the completion
511
511
512 Specifically, when completing on ``object.<tab>``.
512 Specifically, when completing on ``object.<tab>``.
513
513
514 When True: only those names in obj.__all__ will be included.
514 When True: only those names in obj.__all__ will be included.
515
515
516 When False [default]: the __all__ attribute is ignored
516 When False [default]: the __all__ attribute is ignored
517 """
517 """
518 )
518 )
519
519
520 def __init__(self, shell=None, namespace=None, global_namespace=None,
520 def __init__(self, shell=None, namespace=None, global_namespace=None,
521 use_readline=True, config=None, **kwargs):
521 use_readline=True, config=None, **kwargs):
522 """IPCompleter() -> completer
522 """IPCompleter() -> completer
523
523
524 Return a completer object suitable for use by the readline library
524 Return a completer object suitable for use by the readline library
525 via readline.set_completer().
525 via readline.set_completer().
526
526
527 Inputs:
527 Inputs:
528
528
529 - shell: a pointer to the ipython shell itself. This is needed
529 - shell: a pointer to the ipython shell itself. This is needed
530 because this completer knows about magic functions, and those can
530 because this completer knows about magic functions, and those can
531 only be accessed via the ipython instance.
531 only be accessed via the ipython instance.
532
532
533 - namespace: an optional dict where completions are performed.
533 - namespace: an optional dict where completions are performed.
534
534
535 - global_namespace: secondary optional dict for completions, to
535 - global_namespace: secondary optional dict for completions, to
536 handle cases (such as IPython embedded inside functions) where
536 handle cases (such as IPython embedded inside functions) where
537 both Python scopes are visible.
537 both Python scopes are visible.
538
538
539 use_readline : bool, optional
539 use_readline : bool, optional
540 If true, use the readline library. This completer can still function
540 If true, use the readline library. This completer can still function
541 without readline, though in that case callers must provide some extra
541 without readline, though in that case callers must provide some extra
542 information on each call about the current line."""
542 information on each call about the current line."""
543
543
544 self.magic_escape = ESC_MAGIC
544 self.magic_escape = ESC_MAGIC
545 self.splitter = CompletionSplitter()
545 self.splitter = CompletionSplitter()
546
546
547 # Readline configuration, only used by the rlcompleter method.
547 # Readline configuration, only used by the rlcompleter method.
548 if use_readline:
548 if use_readline:
549 # We store the right version of readline so that later code
549 # We store the right version of readline so that later code
550 import IPython.utils.rlineimpl as readline
550 import IPython.utils.rlineimpl as readline
551 self.readline = readline
551 self.readline = readline
552 else:
552 else:
553 self.readline = None
553 self.readline = None
554
554
555 # _greedy_changed() depends on splitter and readline being defined:
555 # _greedy_changed() depends on splitter and readline being defined:
556 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
556 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
557 config=config, **kwargs)
557 config=config, **kwargs)
558
558
559 # List where completion matches will be stored
559 # List where completion matches will be stored
560 self.matches = []
560 self.matches = []
561 self.shell = shell
561 self.shell = shell
562 # Regexp to split filenames with spaces in them
562 # Regexp to split filenames with spaces in them
563 self.space_name_re = re.compile(r'([^\\] )')
563 self.space_name_re = re.compile(r'([^\\] )')
564 # Hold a local ref. to glob.glob for speed
564 # Hold a local ref. to glob.glob for speed
565 self.glob = glob.glob
565 self.glob = glob.glob
566
566
567 # Determine if we are running on 'dumb' terminals, like (X)Emacs
567 # Determine if we are running on 'dumb' terminals, like (X)Emacs
568 # buffers, to avoid completion problems.
568 # buffers, to avoid completion problems.
569 term = os.environ.get('TERM','xterm')
569 term = os.environ.get('TERM','xterm')
570 self.dumb_terminal = term in ['dumb','emacs']
570 self.dumb_terminal = term in ['dumb','emacs']
571
571
572 # Special handling of backslashes needed in win32 platforms
572 # Special handling of backslashes needed in win32 platforms
573 if sys.platform == "win32":
573 if sys.platform == "win32":
574 self.clean_glob = self._clean_glob_win32
574 self.clean_glob = self._clean_glob_win32
575 else:
575 else:
576 self.clean_glob = self._clean_glob
576 self.clean_glob = self._clean_glob
577
577
578 #regexp to parse docstring for function signature
578 #regexp to parse docstring for function signature
579 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
579 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
580 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
580 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
581 #use this if positional argument name is also needed
581 #use this if positional argument name is also needed
582 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
582 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
583
583
584 # All active matcher routines for completion
584 # All active matcher routines for completion
585 self.matchers = [self.python_matches,
585 self.matchers = [self.python_matches,
586 self.file_matches,
586 self.file_matches,
587 self.magic_matches,
587 self.magic_matches,
588 self.python_func_kw_matches,
588 self.python_func_kw_matches,
589 self.dict_key_matches,
589 self.dict_key_matches,
590 ]
590 ]
591
591
592 def all_completions(self, text):
592 def all_completions(self, text):
593 """
593 """
594 Wrapper around the complete method for the benefit of emacs
594 Wrapper around the complete method for the benefit of emacs
595 and pydb.
595 and pydb.
596 """
596 """
597 return self.complete(text)[1]
597 return self.complete(text)[1]
598
598
599 def _clean_glob(self,text):
599 def _clean_glob(self,text):
600 return self.glob("%s*" % text)
600 return self.glob("%s*" % text)
601
601
602 def _clean_glob_win32(self,text):
602 def _clean_glob_win32(self,text):
603 return [f.replace("\\","/")
603 return [f.replace("\\","/")
604 for f in self.glob("%s*" % text)]
604 for f in self.glob("%s*" % text)]
605
605
606 def file_matches(self, text):
606 def file_matches(self, text):
607 """Match filenames, expanding ~USER type strings.
607 """Match filenames, expanding ~USER type strings.
608
608
609 Most of the seemingly convoluted logic in this completer is an
609 Most of the seemingly convoluted logic in this completer is an
610 attempt to handle filenames with spaces in them. And yet it's not
610 attempt to handle filenames with spaces in them. And yet it's not
611 quite perfect, because Python's readline doesn't expose all of the
611 quite perfect, because Python's readline doesn't expose all of the
612 GNU readline details needed for this to be done correctly.
612 GNU readline details needed for this to be done correctly.
613
613
614 For a filename with a space in it, the printed completions will be
614 For a filename with a space in it, the printed completions will be
615 only the parts after what's already been typed (instead of the
615 only the parts after what's already been typed (instead of the
616 full completions, as is normally done). I don't think with the
616 full completions, as is normally done). I don't think with the
617 current (as of Python 2.3) Python readline it's possible to do
617 current (as of Python 2.3) Python readline it's possible to do
618 better."""
618 better."""
619
619
620 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
620 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
621
621
622 # chars that require escaping with backslash - i.e. chars
622 # chars that require escaping with backslash - i.e. chars
623 # that readline treats incorrectly as delimiters, but we
623 # that readline treats incorrectly as delimiters, but we
624 # don't want to treat as delimiters in filename matching
624 # don't want to treat as delimiters in filename matching
625 # when escaped with backslash
625 # when escaped with backslash
626 if text.startswith('!'):
626 if text.startswith('!'):
627 text = text[1:]
627 text = text[1:]
628 text_prefix = '!'
628 text_prefix = '!'
629 else:
629 else:
630 text_prefix = ''
630 text_prefix = ''
631
631
632 text_until_cursor = self.text_until_cursor
632 text_until_cursor = self.text_until_cursor
633 # track strings with open quotes
633 # track strings with open quotes
634 open_quotes = has_open_quotes(text_until_cursor)
634 open_quotes = has_open_quotes(text_until_cursor)
635
635
636 if '(' in text_until_cursor or '[' in text_until_cursor:
636 if '(' in text_until_cursor or '[' in text_until_cursor:
637 lsplit = text
637 lsplit = text
638 else:
638 else:
639 try:
639 try:
640 # arg_split ~ shlex.split, but with unicode bugs fixed by us
640 # arg_split ~ shlex.split, but with unicode bugs fixed by us
641 lsplit = arg_split(text_until_cursor)[-1]
641 lsplit = arg_split(text_until_cursor)[-1]
642 except ValueError:
642 except ValueError:
643 # typically an unmatched ", or backslash without escaped char.
643 # typically an unmatched ", or backslash without escaped char.
644 if open_quotes:
644 if open_quotes:
645 lsplit = text_until_cursor.split(open_quotes)[-1]
645 lsplit = text_until_cursor.split(open_quotes)[-1]
646 else:
646 else:
647 return []
647 return []
648 except IndexError:
648 except IndexError:
649 # tab pressed on empty line
649 # tab pressed on empty line
650 lsplit = ""
650 lsplit = ""
651
651
652 if not open_quotes and lsplit != protect_filename(lsplit):
652 if not open_quotes and lsplit != protect_filename(lsplit):
653 # if protectables are found, do matching on the whole escaped name
653 # if protectables are found, do matching on the whole escaped name
654 has_protectables = True
654 has_protectables = True
655 text0,text = text,lsplit
655 text0,text = text,lsplit
656 else:
656 else:
657 has_protectables = False
657 has_protectables = False
658 text = os.path.expanduser(text)
658 text = os.path.expanduser(text)
659
659
660 if text == "":
660 if text == "":
661 return [text_prefix + protect_filename(f) for f in self.glob("*")]
661 return [text_prefix + protect_filename(f) for f in self.glob("*")]
662
662
663 # Compute the matches from the filesystem
663 # Compute the matches from the filesystem
664 m0 = self.clean_glob(text.replace('\\',''))
664 m0 = self.clean_glob(text.replace('\\',''))
665
665
666 if has_protectables:
666 if has_protectables:
667 # If we had protectables, we need to revert our changes to the
667 # If we had protectables, we need to revert our changes to the
668 # beginning of filename so that we don't double-write the part
668 # beginning of filename so that we don't double-write the part
669 # of the filename we have so far
669 # of the filename we have so far
670 len_lsplit = len(lsplit)
670 len_lsplit = len(lsplit)
671 matches = [text_prefix + text0 +
671 matches = [text_prefix + text0 +
672 protect_filename(f[len_lsplit:]) for f in m0]
672 protect_filename(f[len_lsplit:]) for f in m0]
673 else:
673 else:
674 if open_quotes:
674 if open_quotes:
675 # if we have a string with an open quote, we don't need to
675 # if we have a string with an open quote, we don't need to
676 # protect the names at all (and we _shouldn't_, as it
676 # protect the names at all (and we _shouldn't_, as it
677 # would cause bugs when the filesystem call is made).
677 # would cause bugs when the filesystem call is made).
678 matches = m0
678 matches = m0
679 else:
679 else:
680 matches = [text_prefix +
680 matches = [text_prefix +
681 protect_filename(f) for f in m0]
681 protect_filename(f) for f in m0]
682
682
683 #io.rprint('mm', matches) # dbg
683 #io.rprint('mm', matches) # dbg
684
684
685 # Mark directories in input list by appending '/' to their names.
685 # Mark directories in input list by appending '/' to their names.
686 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
686 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
687 return matches
687 return matches
688
688
689 def magic_matches(self, text):
689 def magic_matches(self, text):
690 """Match magics"""
690 """Match magics"""
691 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
691 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
692 # Get all shell magics now rather than statically, so magics loaded at
692 # Get all shell magics now rather than statically, so magics loaded at
693 # runtime show up too.
693 # runtime show up too.
694 lsm = self.shell.magics_manager.lsmagic()
694 lsm = self.shell.magics_manager.lsmagic()
695 line_magics = lsm['line']
695 line_magics = lsm['line']
696 cell_magics = lsm['cell']
696 cell_magics = lsm['cell']
697 pre = self.magic_escape
697 pre = self.magic_escape
698 pre2 = pre+pre
698 pre2 = pre+pre
699
699
700 # Completion logic:
700 # Completion logic:
701 # - user gives %%: only do cell magics
701 # - user gives %%: only do cell magics
702 # - user gives %: do both line and cell magics
702 # - user gives %: do both line and cell magics
703 # - no prefix: do both
703 # - no prefix: do both
704 # In other words, line magics are skipped if the user gives %% explicitly
704 # In other words, line magics are skipped if the user gives %% explicitly
705 bare_text = text.lstrip(pre)
705 bare_text = text.lstrip(pre)
706 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
706 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
707 if not text.startswith(pre2):
707 if not text.startswith(pre2):
708 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
708 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
709 return comp
709 return comp
710
710
711 def python_matches(self,text):
711 def python_matches(self,text):
712 """Match attributes or global python names"""
712 """Match attributes or global python names"""
713
713
714 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
714 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
715 if "." in text:
715 if "." in text:
716 try:
716 try:
717 matches = self.attr_matches(text)
717 matches = self.attr_matches(text)
718 if text.endswith('.') and self.omit__names:
718 if text.endswith('.') and self.omit__names:
719 if self.omit__names == 1:
719 if self.omit__names == 1:
720 # true if txt is _not_ a __ name, false otherwise:
720 # true if txt is _not_ a __ name, false otherwise:
721 no__name = (lambda txt:
721 no__name = (lambda txt:
722 re.match(r'.*\.__.*?__',txt) is None)
722 re.match(r'.*\.__.*?__',txt) is None)
723 else:
723 else:
724 # true if txt is _not_ a _ name, false otherwise:
724 # true if txt is _not_ a _ name, false otherwise:
725 no__name = (lambda txt:
725 no__name = (lambda txt:
726 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
726 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
727 matches = filter(no__name, matches)
727 matches = filter(no__name, matches)
728 except NameError:
728 except NameError:
729 # catches <undefined attributes>.<tab>
729 # catches <undefined attributes>.<tab>
730 matches = []
730 matches = []
731 else:
731 else:
732 matches = self.global_matches(text)
732 matches = self.global_matches(text)
733
733
734 return matches
734 return matches
735
735
736 def _default_arguments_from_docstring(self, doc):
736 def _default_arguments_from_docstring(self, doc):
737 """Parse the first line of docstring for call signature.
737 """Parse the first line of docstring for call signature.
738
738
739 Docstring should be of the form 'min(iterable[, key=func])\n'.
739 Docstring should be of the form 'min(iterable[, key=func])\n'.
740 It can also parse cython docstring of the form
740 It can also parse cython docstring of the form
741 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
741 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
742 """
742 """
743 if doc is None:
743 if doc is None:
744 return []
744 return []
745
745
746 #care only the firstline
746 #care only the firstline
747 line = doc.lstrip().splitlines()[0]
747 line = doc.lstrip().splitlines()[0]
748
748
749 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
749 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
750 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
750 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
751 sig = self.docstring_sig_re.search(line)
751 sig = self.docstring_sig_re.search(line)
752 if sig is None:
752 if sig is None:
753 return []
753 return []
754 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
754 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
755 sig = sig.groups()[0].split(',')
755 sig = sig.groups()[0].split(',')
756 ret = []
756 ret = []
757 for s in sig:
757 for s in sig:
758 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
758 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
759 ret += self.docstring_kwd_re.findall(s)
759 ret += self.docstring_kwd_re.findall(s)
760 return ret
760 return ret
761
761
762 def _default_arguments(self, obj):
762 def _default_arguments(self, obj):
763 """Return the list of default arguments of obj if it is callable,
763 """Return the list of default arguments of obj if it is callable,
764 or empty list otherwise."""
764 or empty list otherwise."""
765 call_obj = obj
765 call_obj = obj
766 ret = []
766 ret = []
767 if inspect.isbuiltin(obj):
767 if inspect.isbuiltin(obj):
768 pass
768 pass
769 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
769 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
770 if inspect.isclass(obj):
770 if inspect.isclass(obj):
771 #for cython embededsignature=True the constructor docstring
771 #for cython embededsignature=True the constructor docstring
772 #belongs to the object itself not __init__
772 #belongs to the object itself not __init__
773 ret += self._default_arguments_from_docstring(
773 ret += self._default_arguments_from_docstring(
774 getattr(obj, '__doc__', ''))
774 getattr(obj, '__doc__', ''))
775 # for classes, check for __init__,__new__
775 # for classes, check for __init__,__new__
776 call_obj = (getattr(obj, '__init__', None) or
776 call_obj = (getattr(obj, '__init__', None) or
777 getattr(obj, '__new__', None))
777 getattr(obj, '__new__', None))
778 # for all others, check if they are __call__able
778 # for all others, check if they are __call__able
779 elif hasattr(obj, '__call__'):
779 elif hasattr(obj, '__call__'):
780 call_obj = obj.__call__
780 call_obj = obj.__call__
781
781
782 ret += self._default_arguments_from_docstring(
782 ret += self._default_arguments_from_docstring(
783 getattr(call_obj, '__doc__', ''))
783 getattr(call_obj, '__doc__', ''))
784
784
785 try:
785 try:
786 args,_,_1,defaults = inspect.getargspec(call_obj)
786 args,_,_1,defaults = inspect.getargspec(call_obj)
787 if defaults:
787 if defaults:
788 ret+=args[-len(defaults):]
788 ret+=args[-len(defaults):]
789 except TypeError:
789 except TypeError:
790 pass
790 pass
791
791
792 return list(set(ret))
792 return list(set(ret))
793
793
794 def python_func_kw_matches(self,text):
794 def python_func_kw_matches(self,text):
795 """Match named parameters (kwargs) of the last open function"""
795 """Match named parameters (kwargs) of the last open function"""
796
796
797 if "." in text: # a parameter cannot be dotted
797 if "." in text: # a parameter cannot be dotted
798 return []
798 return []
799 try: regexp = self.__funcParamsRegex
799 try: regexp = self.__funcParamsRegex
800 except AttributeError:
800 except AttributeError:
801 regexp = self.__funcParamsRegex = re.compile(r'''
801 regexp = self.__funcParamsRegex = re.compile(r'''
802 '.*?(?<!\\)' | # single quoted strings or
802 '.*?(?<!\\)' | # single quoted strings or
803 ".*?(?<!\\)" | # double quoted strings or
803 ".*?(?<!\\)" | # double quoted strings or
804 \w+ | # identifier
804 \w+ | # identifier
805 \S # other characters
805 \S # other characters
806 ''', re.VERBOSE | re.DOTALL)
806 ''', re.VERBOSE | re.DOTALL)
807 # 1. find the nearest identifier that comes before an unclosed
807 # 1. find the nearest identifier that comes before an unclosed
808 # parenthesis before the cursor
808 # parenthesis before the cursor
809 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
809 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
810 tokens = regexp.findall(self.text_until_cursor)
810 tokens = regexp.findall(self.text_until_cursor)
811 tokens.reverse()
811 tokens.reverse()
812 iterTokens = iter(tokens); openPar = 0
812 iterTokens = iter(tokens); openPar = 0
813
813
814 for token in iterTokens:
814 for token in iterTokens:
815 if token == ')':
815 if token == ')':
816 openPar -= 1
816 openPar -= 1
817 elif token == '(':
817 elif token == '(':
818 openPar += 1
818 openPar += 1
819 if openPar > 0:
819 if openPar > 0:
820 # found the last unclosed parenthesis
820 # found the last unclosed parenthesis
821 break
821 break
822 else:
822 else:
823 return []
823 return []
824 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
824 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
825 ids = []
825 ids = []
826 isId = re.compile(r'\w+$').match
826 isId = re.compile(r'\w+$').match
827
827
828 while True:
828 while True:
829 try:
829 try:
830 ids.append(next(iterTokens))
830 ids.append(next(iterTokens))
831 if not isId(ids[-1]):
831 if not isId(ids[-1]):
832 ids.pop(); break
832 ids.pop(); break
833 if not next(iterTokens) == '.':
833 if not next(iterTokens) == '.':
834 break
834 break
835 except StopIteration:
835 except StopIteration:
836 break
836 break
837 # lookup the candidate callable matches either using global_matches
837 # lookup the candidate callable matches either using global_matches
838 # or attr_matches for dotted names
838 # or attr_matches for dotted names
839 if len(ids) == 1:
839 if len(ids) == 1:
840 callableMatches = self.global_matches(ids[0])
840 callableMatches = self.global_matches(ids[0])
841 else:
841 else:
842 callableMatches = self.attr_matches('.'.join(ids[::-1]))
842 callableMatches = self.attr_matches('.'.join(ids[::-1]))
843 argMatches = []
843 argMatches = []
844 for callableMatch in callableMatches:
844 for callableMatch in callableMatches:
845 try:
845 try:
846 namedArgs = self._default_arguments(eval(callableMatch,
846 namedArgs = self._default_arguments(eval(callableMatch,
847 self.namespace))
847 self.namespace))
848 except:
848 except:
849 continue
849 continue
850
850
851 for namedArg in namedArgs:
851 for namedArg in namedArgs:
852 if namedArg.startswith(text):
852 if namedArg.startswith(text):
853 argMatches.append("%s=" %namedArg)
853 argMatches.append("%s=" %namedArg)
854 return argMatches
854 return argMatches
855
855
856 def dict_key_matches(self, text):
856 def dict_key_matches(self, text):
857 "Match string keys in a dictionary, after e.g. 'foo[' "
857 "Match string keys in a dictionary, after e.g. 'foo[' "
858 def get_keys(obj):
858 def get_keys(obj):
859 # Only allow completion for known in-memory dict-like types
859 # Only allow completion for known in-memory dict-like types
860 if isinstance(obj, dict) or\
860 if isinstance(obj, dict) or\
861 _safe_isinstance(obj, 'pandas', 'DataFrame'):
861 _safe_isinstance(obj, 'pandas', 'DataFrame'):
862 try:
862 try:
863 return list(obj.keys())
863 return list(obj.keys())
864 except Exception:
864 except Exception:
865 return []
865 return []
866 elif _safe_isinstance(obj, 'numpy', 'ndarray'):
866 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
867 _safe_isinstance(obj, 'numpy', 'void'):
867 return obj.dtype.names or []
868 return obj.dtype.names or []
868 return []
869 return []
869
870
870 try:
871 try:
871 regexps = self.__dict_key_regexps
872 regexps = self.__dict_key_regexps
872 except AttributeError:
873 except AttributeError:
873 dict_key_re_fmt = r'''(?x)
874 dict_key_re_fmt = r'''(?x)
874 ( # match dict-referring expression wrt greedy setting
875 ( # match dict-referring expression wrt greedy setting
875 %s
876 %s
876 )
877 )
877 \[ # open bracket
878 \[ # open bracket
878 \s* # and optional whitespace
879 \s* # and optional whitespace
879 ([uUbB]? # string prefix (r not handled)
880 ([uUbB]? # string prefix (r not handled)
880 (?: # unclosed string
881 (?: # unclosed string
881 '(?:[^']|(?<!\\)\\')*
882 '(?:[^']|(?<!\\)\\')*
882 |
883 |
883 "(?:[^"]|(?<!\\)\\")*
884 "(?:[^"]|(?<!\\)\\")*
884 )
885 )
885 )?
886 )?
886 $
887 $
887 '''
888 '''
888 regexps = self.__dict_key_regexps = {
889 regexps = self.__dict_key_regexps = {
889 False: re.compile(dict_key_re_fmt % '''
890 False: re.compile(dict_key_re_fmt % '''
890 # identifiers separated by .
891 # identifiers separated by .
891 (?!\d)\w+
892 (?!\d)\w+
892 (?:\.(?!\d)\w+)*
893 (?:\.(?!\d)\w+)*
893 '''),
894 '''),
894 True: re.compile(dict_key_re_fmt % '''
895 True: re.compile(dict_key_re_fmt % '''
895 .+
896 .+
896 ''')
897 ''')
897 }
898 }
898
899
899 match = regexps[self.greedy].search(self.text_until_cursor)
900 match = regexps[self.greedy].search(self.text_until_cursor)
900 if match is None:
901 if match is None:
901 return []
902 return []
902
903
903 expr, prefix = match.groups()
904 expr, prefix = match.groups()
904 try:
905 try:
905 obj = eval(expr, self.namespace)
906 obj = eval(expr, self.namespace)
906 except Exception:
907 except Exception:
907 try:
908 try:
908 obj = eval(expr, self.global_namespace)
909 obj = eval(expr, self.global_namespace)
909 except Exception:
910 except Exception:
910 return []
911 return []
911
912
912 keys = get_keys(obj)
913 keys = get_keys(obj)
913 if not keys:
914 if not keys:
914 return keys
915 return keys
915 closing_quote, token_offset, matches = match_dict_keys(keys, prefix)
916 closing_quote, token_offset, matches = match_dict_keys(keys, prefix)
916 if not matches:
917 if not matches:
917 return matches
918 return matches
918
919
919 # get the cursor position of
920 # get the cursor position of
920 # - the text being completed
921 # - the text being completed
921 # - the start of the key text
922 # - the start of the key text
922 # - the start of the completion
923 # - the start of the completion
923 text_start = len(self.text_until_cursor) - len(text)
924 text_start = len(self.text_until_cursor) - len(text)
924 if prefix:
925 if prefix:
925 key_start = match.start(2)
926 key_start = match.start(2)
926 completion_start = key_start + token_offset
927 completion_start = key_start + token_offset
927 else:
928 else:
928 key_start = completion_start = match.end()
929 key_start = completion_start = match.end()
929
930
930 # grab the leading prefix, to make sure all completions start with `text`
931 # grab the leading prefix, to make sure all completions start with `text`
931 if text_start > key_start:
932 if text_start > key_start:
932 leading = ''
933 leading = ''
933 else:
934 else:
934 leading = text[text_start:completion_start]
935 leading = text[text_start:completion_start]
935
936
936 # the index of the `[` character
937 # the index of the `[` character
937 bracket_idx = match.end(1)
938 bracket_idx = match.end(1)
938
939
939 # append closing quote and bracket as appropriate
940 # append closing quote and bracket as appropriate
940 # this is *not* appropriate if the opening quote or bracket is outside
941 # this is *not* appropriate if the opening quote or bracket is outside
941 # the text given to this method
942 # the text given to this method
942 suf = ''
943 suf = ''
943 continuation = self.line_buffer[len(self.text_until_cursor):]
944 continuation = self.line_buffer[len(self.text_until_cursor):]
944 if key_start > text_start and closing_quote:
945 if key_start > text_start and closing_quote:
945 # quotes were opened inside text, maybe close them
946 # quotes were opened inside text, maybe close them
946 if continuation.startswith(closing_quote):
947 if continuation.startswith(closing_quote):
947 continuation = continuation[len(closing_quote):]
948 continuation = continuation[len(closing_quote):]
948 else:
949 else:
949 suf += closing_quote
950 suf += closing_quote
950 if bracket_idx > text_start:
951 if bracket_idx > text_start:
951 # brackets were opened inside text, maybe close them
952 # brackets were opened inside text, maybe close them
952 if not continuation.startswith(']'):
953 if not continuation.startswith(']'):
953 suf += ']'
954 suf += ']'
954
955
955 return [leading + k + suf for k in matches]
956 return [leading + k + suf for k in matches]
956
957
957 def latex_matches(self, text):
958 def latex_matches(self, text):
958 u"""Match Latex syntax for unicode characters.
959 u"""Match Latex syntax for unicode characters.
959
960
960 This does both \\alp -> \\alpha and \\alpha -> Ξ±
961 This does both \\alp -> \\alpha and \\alpha -> Ξ±
961
962
962 Used on Python 3 only.
963 Used on Python 3 only.
963 """
964 """
964 slashpos = text.rfind('\\')
965 slashpos = text.rfind('\\')
965 if slashpos > -1:
966 if slashpos > -1:
966 s = text[slashpos:]
967 s = text[slashpos:]
967 if s in latex_symbols:
968 if s in latex_symbols:
968 # Try to complete a full latex symbol to unicode
969 # Try to complete a full latex symbol to unicode
969 # \\alpha -> Ξ±
970 # \\alpha -> Ξ±
970 return s, [latex_symbols[s]]
971 return s, [latex_symbols[s]]
971 else:
972 else:
972 # If a user has partially typed a latex symbol, give them
973 # If a user has partially typed a latex symbol, give them
973 # a full list of options \al -> [\aleph, \alpha]
974 # a full list of options \al -> [\aleph, \alpha]
974 matches = [k for k in latex_symbols if k.startswith(s)]
975 matches = [k for k in latex_symbols if k.startswith(s)]
975 return s, matches
976 return s, matches
976 return u'', []
977 return u'', []
977
978
978 def dispatch_custom_completer(self, text):
979 def dispatch_custom_completer(self, text):
979 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
980 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
980 line = self.line_buffer
981 line = self.line_buffer
981 if not line.strip():
982 if not line.strip():
982 return None
983 return None
983
984
984 # Create a little structure to pass all the relevant information about
985 # Create a little structure to pass all the relevant information about
985 # the current completion to any custom completer.
986 # the current completion to any custom completer.
986 event = Bunch()
987 event = Bunch()
987 event.line = line
988 event.line = line
988 event.symbol = text
989 event.symbol = text
989 cmd = line.split(None,1)[0]
990 cmd = line.split(None,1)[0]
990 event.command = cmd
991 event.command = cmd
991 event.text_until_cursor = self.text_until_cursor
992 event.text_until_cursor = self.text_until_cursor
992
993
993 #print "\ncustom:{%s]\n" % event # dbg
994 #print "\ncustom:{%s]\n" % event # dbg
994
995
995 # for foo etc, try also to find completer for %foo
996 # for foo etc, try also to find completer for %foo
996 if not cmd.startswith(self.magic_escape):
997 if not cmd.startswith(self.magic_escape):
997 try_magic = self.custom_completers.s_matches(
998 try_magic = self.custom_completers.s_matches(
998 self.magic_escape + cmd)
999 self.magic_escape + cmd)
999 else:
1000 else:
1000 try_magic = []
1001 try_magic = []
1001
1002
1002 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1003 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1003 try_magic,
1004 try_magic,
1004 self.custom_completers.flat_matches(self.text_until_cursor)):
1005 self.custom_completers.flat_matches(self.text_until_cursor)):
1005 #print "try",c # dbg
1006 #print "try",c # dbg
1006 try:
1007 try:
1007 res = c(event)
1008 res = c(event)
1008 if res:
1009 if res:
1009 # first, try case sensitive match
1010 # first, try case sensitive match
1010 withcase = [r for r in res if r.startswith(text)]
1011 withcase = [r for r in res if r.startswith(text)]
1011 if withcase:
1012 if withcase:
1012 return withcase
1013 return withcase
1013 # if none, then case insensitive ones are ok too
1014 # if none, then case insensitive ones are ok too
1014 text_low = text.lower()
1015 text_low = text.lower()
1015 return [r for r in res if r.lower().startswith(text_low)]
1016 return [r for r in res if r.lower().startswith(text_low)]
1016 except TryNext:
1017 except TryNext:
1017 pass
1018 pass
1018
1019
1019 return None
1020 return None
1020
1021
1021 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1022 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1022 """Find completions for the given text and line context.
1023 """Find completions for the given text and line context.
1023
1024
1024 Note that both the text and the line_buffer are optional, but at least
1025 Note that both the text and the line_buffer are optional, but at least
1025 one of them must be given.
1026 one of them must be given.
1026
1027
1027 Parameters
1028 Parameters
1028 ----------
1029 ----------
1029 text : string, optional
1030 text : string, optional
1030 Text to perform the completion on. If not given, the line buffer
1031 Text to perform the completion on. If not given, the line buffer
1031 is split using the instance's CompletionSplitter object.
1032 is split using the instance's CompletionSplitter object.
1032
1033
1033 line_buffer : string, optional
1034 line_buffer : string, optional
1034 If not given, the completer attempts to obtain the current line
1035 If not given, the completer attempts to obtain the current line
1035 buffer via readline. This keyword allows clients which are
1036 buffer via readline. This keyword allows clients which are
1036 requesting for text completions in non-readline contexts to inform
1037 requesting for text completions in non-readline contexts to inform
1037 the completer of the entire text.
1038 the completer of the entire text.
1038
1039
1039 cursor_pos : int, optional
1040 cursor_pos : int, optional
1040 Index of the cursor in the full line buffer. Should be provided by
1041 Index of the cursor in the full line buffer. Should be provided by
1041 remote frontends where kernel has no access to frontend state.
1042 remote frontends where kernel has no access to frontend state.
1042
1043
1043 Returns
1044 Returns
1044 -------
1045 -------
1045 text : str
1046 text : str
1046 Text that was actually used in the completion.
1047 Text that was actually used in the completion.
1047
1048
1048 matches : list
1049 matches : list
1049 A list of completion matches.
1050 A list of completion matches.
1050 """
1051 """
1051 # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1052 # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1052
1053
1053 # if the cursor position isn't given, the only sane assumption we can
1054 # if the cursor position isn't given, the only sane assumption we can
1054 # make is that it's at the end of the line (the common case)
1055 # make is that it's at the end of the line (the common case)
1055 if cursor_pos is None:
1056 if cursor_pos is None:
1056 cursor_pos = len(line_buffer) if text is None else len(text)
1057 cursor_pos = len(line_buffer) if text is None else len(text)
1057
1058
1058 if PY3:
1059 if PY3:
1059 latex_text = text if not line_buffer else line_buffer[:cursor_pos]
1060 latex_text = text if not line_buffer else line_buffer[:cursor_pos]
1060 latex_text, latex_matches = self.latex_matches(latex_text)
1061 latex_text, latex_matches = self.latex_matches(latex_text)
1061 if latex_matches:
1062 if latex_matches:
1062 return latex_text, latex_matches
1063 return latex_text, latex_matches
1063
1064
1064 # if text is either None or an empty string, rely on the line buffer
1065 # if text is either None or an empty string, rely on the line buffer
1065 if not text:
1066 if not text:
1066 text = self.splitter.split_line(line_buffer, cursor_pos)
1067 text = self.splitter.split_line(line_buffer, cursor_pos)
1067
1068
1068 # If no line buffer is given, assume the input text is all there was
1069 # If no line buffer is given, assume the input text is all there was
1069 if line_buffer is None:
1070 if line_buffer is None:
1070 line_buffer = text
1071 line_buffer = text
1071
1072
1072 self.line_buffer = line_buffer
1073 self.line_buffer = line_buffer
1073 self.text_until_cursor = self.line_buffer[:cursor_pos]
1074 self.text_until_cursor = self.line_buffer[:cursor_pos]
1074 # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1075 # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1075
1076
1076 # Start with a clean slate of completions
1077 # Start with a clean slate of completions
1077 self.matches[:] = []
1078 self.matches[:] = []
1078 custom_res = self.dispatch_custom_completer(text)
1079 custom_res = self.dispatch_custom_completer(text)
1079 if custom_res is not None:
1080 if custom_res is not None:
1080 # did custom completers produce something?
1081 # did custom completers produce something?
1081 self.matches = custom_res
1082 self.matches = custom_res
1082 else:
1083 else:
1083 # Extend the list of completions with the results of each
1084 # Extend the list of completions with the results of each
1084 # matcher, so we return results to the user from all
1085 # matcher, so we return results to the user from all
1085 # namespaces.
1086 # namespaces.
1086 if self.merge_completions:
1087 if self.merge_completions:
1087 self.matches = []
1088 self.matches = []
1088 for matcher in self.matchers:
1089 for matcher in self.matchers:
1089 try:
1090 try:
1090 self.matches.extend(matcher(text))
1091 self.matches.extend(matcher(text))
1091 except:
1092 except:
1092 # Show the ugly traceback if the matcher causes an
1093 # Show the ugly traceback if the matcher causes an
1093 # exception, but do NOT crash the kernel!
1094 # exception, but do NOT crash the kernel!
1094 sys.excepthook(*sys.exc_info())
1095 sys.excepthook(*sys.exc_info())
1095 else:
1096 else:
1096 for matcher in self.matchers:
1097 for matcher in self.matchers:
1097 self.matches = matcher(text)
1098 self.matches = matcher(text)
1098 if self.matches:
1099 if self.matches:
1099 break
1100 break
1100 # FIXME: we should extend our api to return a dict with completions for
1101 # FIXME: we should extend our api to return a dict with completions for
1101 # different types of objects. The rlcomplete() method could then
1102 # different types of objects. The rlcomplete() method could then
1102 # simply collapse the dict into a list for readline, but we'd have
1103 # simply collapse the dict into a list for readline, but we'd have
1103 # richer completion semantics in other evironments.
1104 # richer completion semantics in other evironments.
1104
1105
1105 # use penalize_magics_key to put magics after variables with same name
1106 # use penalize_magics_key to put magics after variables with same name
1106 self.matches = sorted(set(self.matches), key=penalize_magics_key)
1107 self.matches = sorted(set(self.matches), key=penalize_magics_key)
1107
1108
1108 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1109 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1109 return text, self.matches
1110 return text, self.matches
1110
1111
1111 def rlcomplete(self, text, state):
1112 def rlcomplete(self, text, state):
1112 """Return the state-th possible completion for 'text'.
1113 """Return the state-th possible completion for 'text'.
1113
1114
1114 This is called successively with state == 0, 1, 2, ... until it
1115 This is called successively with state == 0, 1, 2, ... until it
1115 returns None. The completion should begin with 'text'.
1116 returns None. The completion should begin with 'text'.
1116
1117
1117 Parameters
1118 Parameters
1118 ----------
1119 ----------
1119 text : string
1120 text : string
1120 Text to perform the completion on.
1121 Text to perform the completion on.
1121
1122
1122 state : int
1123 state : int
1123 Counter used by readline.
1124 Counter used by readline.
1124 """
1125 """
1125 if state==0:
1126 if state==0:
1126
1127
1127 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1128 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1128 cursor_pos = self.readline.get_endidx()
1129 cursor_pos = self.readline.get_endidx()
1129
1130
1130 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1131 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1131 # (text, line_buffer, cursor_pos) ) # dbg
1132 # (text, line_buffer, cursor_pos) ) # dbg
1132
1133
1133 # if there is only a tab on a line with only whitespace, instead of
1134 # if there is only a tab on a line with only whitespace, instead of
1134 # the mostly useless 'do you want to see all million completions'
1135 # the mostly useless 'do you want to see all million completions'
1135 # message, just do the right thing and give the user his tab!
1136 # message, just do the right thing and give the user his tab!
1136 # Incidentally, this enables pasting of tabbed text from an editor
1137 # Incidentally, this enables pasting of tabbed text from an editor
1137 # (as long as autoindent is off).
1138 # (as long as autoindent is off).
1138
1139
1139 # It should be noted that at least pyreadline still shows file
1140 # It should be noted that at least pyreadline still shows file
1140 # completions - is there a way around it?
1141 # completions - is there a way around it?
1141
1142
1142 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1143 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1143 # we don't interfere with their own tab-completion mechanism.
1144 # we don't interfere with their own tab-completion mechanism.
1144 if not (self.dumb_terminal or line_buffer.strip()):
1145 if not (self.dumb_terminal or line_buffer.strip()):
1145 self.readline.insert_text('\t')
1146 self.readline.insert_text('\t')
1146 sys.stdout.flush()
1147 sys.stdout.flush()
1147 return None
1148 return None
1148
1149
1149 # Note: debugging exceptions that may occur in completion is very
1150 # Note: debugging exceptions that may occur in completion is very
1150 # tricky, because readline unconditionally silences them. So if
1151 # tricky, because readline unconditionally silences them. So if
1151 # during development you suspect a bug in the completion code, turn
1152 # during development you suspect a bug in the completion code, turn
1152 # this flag on temporarily by uncommenting the second form (don't
1153 # this flag on temporarily by uncommenting the second form (don't
1153 # flip the value in the first line, as the '# dbg' marker can be
1154 # flip the value in the first line, as the '# dbg' marker can be
1154 # automatically detected and is used elsewhere).
1155 # automatically detected and is used elsewhere).
1155 DEBUG = False
1156 DEBUG = False
1156 #DEBUG = True # dbg
1157 #DEBUG = True # dbg
1157 if DEBUG:
1158 if DEBUG:
1158 try:
1159 try:
1159 self.complete(text, line_buffer, cursor_pos)
1160 self.complete(text, line_buffer, cursor_pos)
1160 except:
1161 except:
1161 import traceback; traceback.print_exc()
1162 import traceback; traceback.print_exc()
1162 else:
1163 else:
1163 # The normal production version is here
1164 # The normal production version is here
1164
1165
1165 # This method computes the self.matches array
1166 # This method computes the self.matches array
1166 self.complete(text, line_buffer, cursor_pos)
1167 self.complete(text, line_buffer, cursor_pos)
1167
1168
1168 try:
1169 try:
1169 return self.matches[state]
1170 return self.matches[state]
1170 except IndexError:
1171 except IndexError:
1171 return None
1172 return None
@@ -1,701 +1,715 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Tests for the IPython tab-completion machinery."""
2 """Tests for the IPython tab-completion machinery."""
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import sys
8 import sys
9 import unittest
9 import unittest
10
10
11 from contextlib import contextmanager
11 from contextlib import contextmanager
12
12
13 import nose.tools as nt
13 import nose.tools as nt
14
14
15 from IPython.config.loader import Config
15 from IPython.config.loader import Config
16 from IPython.core import completer
16 from IPython.core import completer
17 from IPython.external.decorators import knownfailureif
17 from IPython.external.decorators import knownfailureif
18 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
18 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
19 from IPython.utils.generics import complete_object
19 from IPython.utils.generics import complete_object
20 from IPython.utils import py3compat
20 from IPython.utils import py3compat
21 from IPython.utils.py3compat import string_types, unicode_type
21 from IPython.utils.py3compat import string_types, unicode_type
22 from IPython.testing import decorators as dec
22 from IPython.testing import decorators as dec
23
23
24 #-----------------------------------------------------------------------------
24 #-----------------------------------------------------------------------------
25 # Test functions
25 # Test functions
26 #-----------------------------------------------------------------------------
26 #-----------------------------------------------------------------------------
27
27
28 @contextmanager
28 @contextmanager
29 def greedy_completion():
29 def greedy_completion():
30 ip = get_ipython()
30 ip = get_ipython()
31 greedy_original = ip.Completer.greedy
31 greedy_original = ip.Completer.greedy
32 try:
32 try:
33 ip.Completer.greedy = True
33 ip.Completer.greedy = True
34 yield
34 yield
35 finally:
35 finally:
36 ip.Completer.greedy = greedy_original
36 ip.Completer.greedy = greedy_original
37
37
38 def test_protect_filename():
38 def test_protect_filename():
39 pairs = [ ('abc','abc'),
39 pairs = [ ('abc','abc'),
40 (' abc',r'\ abc'),
40 (' abc',r'\ abc'),
41 ('a bc',r'a\ bc'),
41 ('a bc',r'a\ bc'),
42 ('a bc',r'a\ \ bc'),
42 ('a bc',r'a\ \ bc'),
43 (' bc',r'\ \ bc'),
43 (' bc',r'\ \ bc'),
44 ]
44 ]
45 # On posix, we also protect parens and other special characters
45 # On posix, we also protect parens and other special characters
46 if sys.platform != 'win32':
46 if sys.platform != 'win32':
47 pairs.extend( [('a(bc',r'a\(bc'),
47 pairs.extend( [('a(bc',r'a\(bc'),
48 ('a)bc',r'a\)bc'),
48 ('a)bc',r'a\)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
50 ('a[1]bc', r'a\[1\]bc'),
50 ('a[1]bc', r'a\[1\]bc'),
51 ('a{1}bc', r'a\{1\}bc'),
51 ('a{1}bc', r'a\{1\}bc'),
52 ('a#bc', r'a\#bc'),
52 ('a#bc', r'a\#bc'),
53 ('a?bc', r'a\?bc'),
53 ('a?bc', r'a\?bc'),
54 ('a=bc', r'a\=bc'),
54 ('a=bc', r'a\=bc'),
55 ('a\\bc', r'a\\bc'),
55 ('a\\bc', r'a\\bc'),
56 ('a|bc', r'a\|bc'),
56 ('a|bc', r'a\|bc'),
57 ('a;bc', r'a\;bc'),
57 ('a;bc', r'a\;bc'),
58 ('a:bc', r'a\:bc'),
58 ('a:bc', r'a\:bc'),
59 ("a'bc", r"a\'bc"),
59 ("a'bc", r"a\'bc"),
60 ('a*bc', r'a\*bc'),
60 ('a*bc', r'a\*bc'),
61 ('a"bc', r'a\"bc'),
61 ('a"bc', r'a\"bc'),
62 ('a^bc', r'a\^bc'),
62 ('a^bc', r'a\^bc'),
63 ('a&bc', r'a\&bc'),
63 ('a&bc', r'a\&bc'),
64 ] )
64 ] )
65 # run the actual tests
65 # run the actual tests
66 for s1, s2 in pairs:
66 for s1, s2 in pairs:
67 s1p = completer.protect_filename(s1)
67 s1p = completer.protect_filename(s1)
68 nt.assert_equal(s1p, s2)
68 nt.assert_equal(s1p, s2)
69
69
70
70
71 def check_line_split(splitter, test_specs):
71 def check_line_split(splitter, test_specs):
72 for part1, part2, split in test_specs:
72 for part1, part2, split in test_specs:
73 cursor_pos = len(part1)
73 cursor_pos = len(part1)
74 line = part1+part2
74 line = part1+part2
75 out = splitter.split_line(line, cursor_pos)
75 out = splitter.split_line(line, cursor_pos)
76 nt.assert_equal(out, split)
76 nt.assert_equal(out, split)
77
77
78
78
79 def test_line_split():
79 def test_line_split():
80 """Basic line splitter test with default specs."""
80 """Basic line splitter test with default specs."""
81 sp = completer.CompletionSplitter()
81 sp = completer.CompletionSplitter()
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
84 # was at the end of part1. So an empty part2 represents someone hitting
84 # was at the end of part1. So an empty part2 represents someone hitting
85 # tab at the end of the line, the most common case.
85 # tab at the end of the line, the most common case.
86 t = [('run some/scrip', '', 'some/scrip'),
86 t = [('run some/scrip', '', 'some/scrip'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
88 ('echo $HOM', '', 'HOM'),
88 ('echo $HOM', '', 'HOM'),
89 ('print sys.pa', '', 'sys.pa'),
89 ('print sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
91 ("execfile('scripts/er", '', 'scripts/er'),
91 ("execfile('scripts/er", '', 'scripts/er'),
92 ('a[x.', '', 'x.'),
92 ('a[x.', '', 'x.'),
93 ('a[x.', 'y', 'x.'),
93 ('a[x.', 'y', 'x.'),
94 ('cd "some_file/', '', 'some_file/'),
94 ('cd "some_file/', '', 'some_file/'),
95 ]
95 ]
96 check_line_split(sp, t)
96 check_line_split(sp, t)
97 # Ensure splitting works OK with unicode by re-running the tests with
97 # Ensure splitting works OK with unicode by re-running the tests with
98 # all inputs turned into unicode
98 # all inputs turned into unicode
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
100
100
101
101
102 def test_custom_completion_error():
102 def test_custom_completion_error():
103 """Test that errors from custom attribute completers are silenced."""
103 """Test that errors from custom attribute completers are silenced."""
104 ip = get_ipython()
104 ip = get_ipython()
105 class A(object): pass
105 class A(object): pass
106 ip.user_ns['a'] = A()
106 ip.user_ns['a'] = A()
107
107
108 @complete_object.when_type(A)
108 @complete_object.when_type(A)
109 def complete_A(a, existing_completions):
109 def complete_A(a, existing_completions):
110 raise TypeError("this should be silenced")
110 raise TypeError("this should be silenced")
111
111
112 ip.complete("a.")
112 ip.complete("a.")
113
113
114
114
115 def test_unicode_completions():
115 def test_unicode_completions():
116 ip = get_ipython()
116 ip = get_ipython()
117 # Some strings that trigger different types of completion. Check them both
117 # Some strings that trigger different types of completion. Check them both
118 # in str and unicode forms
118 # in str and unicode forms
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
120 for t in s + list(map(unicode_type, s)):
120 for t in s + list(map(unicode_type, s)):
121 # We don't need to check exact completion values (they may change
121 # We don't need to check exact completion values (they may change
122 # depending on the state of the namespace, but at least no exceptions
122 # depending on the state of the namespace, but at least no exceptions
123 # should be thrown and the return value should be a pair of text, list
123 # should be thrown and the return value should be a pair of text, list
124 # values.
124 # values.
125 text, matches = ip.complete(t)
125 text, matches = ip.complete(t)
126 nt.assert_true(isinstance(text, string_types))
126 nt.assert_true(isinstance(text, string_types))
127 nt.assert_true(isinstance(matches, list))
127 nt.assert_true(isinstance(matches, list))
128
128
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
130 def test_latex_completions():
130 def test_latex_completions():
131 from IPython.core.latex_symbols import latex_symbols
131 from IPython.core.latex_symbols import latex_symbols
132 import random
132 import random
133 ip = get_ipython()
133 ip = get_ipython()
134 # Test some random unicode symbols
134 # Test some random unicode symbols
135 keys = random.sample(latex_symbols.keys(), 10)
135 keys = random.sample(latex_symbols.keys(), 10)
136 for k in keys:
136 for k in keys:
137 text, matches = ip.complete(k)
137 text, matches = ip.complete(k)
138 nt.assert_equal(len(matches),1)
138 nt.assert_equal(len(matches),1)
139 nt.assert_equal(text, k)
139 nt.assert_equal(text, k)
140 nt.assert_equal(matches[0], latex_symbols[k])
140 nt.assert_equal(matches[0], latex_symbols[k])
141 # Test a more complex line
141 # Test a more complex line
142 text, matches = ip.complete(u'print(\\alpha')
142 text, matches = ip.complete(u'print(\\alpha')
143 nt.assert_equals(text, u'\\alpha')
143 nt.assert_equals(text, u'\\alpha')
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
145 # Test multiple matching latex symbols
145 # Test multiple matching latex symbols
146 text, matches = ip.complete(u'\\al')
146 text, matches = ip.complete(u'\\al')
147 nt.assert_in('\\alpha', matches)
147 nt.assert_in('\\alpha', matches)
148 nt.assert_in('\\aleph', matches)
148 nt.assert_in('\\aleph', matches)
149
149
150
150
151 class CompletionSplitterTestCase(unittest.TestCase):
151 class CompletionSplitterTestCase(unittest.TestCase):
152 def setUp(self):
152 def setUp(self):
153 self.sp = completer.CompletionSplitter()
153 self.sp = completer.CompletionSplitter()
154
154
155 def test_delim_setting(self):
155 def test_delim_setting(self):
156 self.sp.delims = ' '
156 self.sp.delims = ' '
157 nt.assert_equal(self.sp.delims, ' ')
157 nt.assert_equal(self.sp.delims, ' ')
158 nt.assert_equal(self.sp._delim_expr, '[\ ]')
158 nt.assert_equal(self.sp._delim_expr, '[\ ]')
159
159
160 def test_spaces(self):
160 def test_spaces(self):
161 """Test with only spaces as split chars."""
161 """Test with only spaces as split chars."""
162 self.sp.delims = ' '
162 self.sp.delims = ' '
163 t = [('foo', '', 'foo'),
163 t = [('foo', '', 'foo'),
164 ('run foo', '', 'foo'),
164 ('run foo', '', 'foo'),
165 ('run foo', 'bar', 'foo'),
165 ('run foo', 'bar', 'foo'),
166 ]
166 ]
167 check_line_split(self.sp, t)
167 check_line_split(self.sp, t)
168
168
169
169
170 def test_has_open_quotes1():
170 def test_has_open_quotes1():
171 for s in ["'", "'''", "'hi' '"]:
171 for s in ["'", "'''", "'hi' '"]:
172 nt.assert_equal(completer.has_open_quotes(s), "'")
172 nt.assert_equal(completer.has_open_quotes(s), "'")
173
173
174
174
175 def test_has_open_quotes2():
175 def test_has_open_quotes2():
176 for s in ['"', '"""', '"hi" "']:
176 for s in ['"', '"""', '"hi" "']:
177 nt.assert_equal(completer.has_open_quotes(s), '"')
177 nt.assert_equal(completer.has_open_quotes(s), '"')
178
178
179
179
180 def test_has_open_quotes3():
180 def test_has_open_quotes3():
181 for s in ["''", "''' '''", "'hi' 'ipython'"]:
181 for s in ["''", "''' '''", "'hi' 'ipython'"]:
182 nt.assert_false(completer.has_open_quotes(s))
182 nt.assert_false(completer.has_open_quotes(s))
183
183
184
184
185 def test_has_open_quotes4():
185 def test_has_open_quotes4():
186 for s in ['""', '""" """', '"hi" "ipython"']:
186 for s in ['""', '""" """', '"hi" "ipython"']:
187 nt.assert_false(completer.has_open_quotes(s))
187 nt.assert_false(completer.has_open_quotes(s))
188
188
189
189
190 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
190 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
191 def test_abspath_file_completions():
191 def test_abspath_file_completions():
192 ip = get_ipython()
192 ip = get_ipython()
193 with TemporaryDirectory() as tmpdir:
193 with TemporaryDirectory() as tmpdir:
194 prefix = os.path.join(tmpdir, 'foo')
194 prefix = os.path.join(tmpdir, 'foo')
195 suffixes = ['1', '2']
195 suffixes = ['1', '2']
196 names = [prefix+s for s in suffixes]
196 names = [prefix+s for s in suffixes]
197 for n in names:
197 for n in names:
198 open(n, 'w').close()
198 open(n, 'w').close()
199
199
200 # Check simple completion
200 # Check simple completion
201 c = ip.complete(prefix)[1]
201 c = ip.complete(prefix)[1]
202 nt.assert_equal(c, names)
202 nt.assert_equal(c, names)
203
203
204 # Now check with a function call
204 # Now check with a function call
205 cmd = 'a = f("%s' % prefix
205 cmd = 'a = f("%s' % prefix
206 c = ip.complete(prefix, cmd)[1]
206 c = ip.complete(prefix, cmd)[1]
207 comp = [prefix+s for s in suffixes]
207 comp = [prefix+s for s in suffixes]
208 nt.assert_equal(c, comp)
208 nt.assert_equal(c, comp)
209
209
210
210
211 def test_local_file_completions():
211 def test_local_file_completions():
212 ip = get_ipython()
212 ip = get_ipython()
213 with TemporaryWorkingDirectory():
213 with TemporaryWorkingDirectory():
214 prefix = './foo'
214 prefix = './foo'
215 suffixes = ['1', '2']
215 suffixes = ['1', '2']
216 names = [prefix+s for s in suffixes]
216 names = [prefix+s for s in suffixes]
217 for n in names:
217 for n in names:
218 open(n, 'w').close()
218 open(n, 'w').close()
219
219
220 # Check simple completion
220 # Check simple completion
221 c = ip.complete(prefix)[1]
221 c = ip.complete(prefix)[1]
222 nt.assert_equal(c, names)
222 nt.assert_equal(c, names)
223
223
224 # Now check with a function call
224 # Now check with a function call
225 cmd = 'a = f("%s' % prefix
225 cmd = 'a = f("%s' % prefix
226 c = ip.complete(prefix, cmd)[1]
226 c = ip.complete(prefix, cmd)[1]
227 comp = [prefix+s for s in suffixes]
227 comp = [prefix+s for s in suffixes]
228 nt.assert_equal(c, comp)
228 nt.assert_equal(c, comp)
229
229
230
230
231 def test_greedy_completions():
231 def test_greedy_completions():
232 ip = get_ipython()
232 ip = get_ipython()
233 ip.ex('a=list(range(5))')
233 ip.ex('a=list(range(5))')
234 _,c = ip.complete('.',line='a[0].')
234 _,c = ip.complete('.',line='a[0].')
235 nt.assert_false('a[0].real' in c,
235 nt.assert_false('a[0].real' in c,
236 "Shouldn't have completed on a[0]: %s"%c)
236 "Shouldn't have completed on a[0]: %s"%c)
237 with greedy_completion():
237 with greedy_completion():
238 _,c = ip.complete('.',line='a[0].')
238 _,c = ip.complete('.',line='a[0].')
239 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
239 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
240
240
241
241
242 def test_omit__names():
242 def test_omit__names():
243 # also happens to test IPCompleter as a configurable
243 # also happens to test IPCompleter as a configurable
244 ip = get_ipython()
244 ip = get_ipython()
245 ip._hidden_attr = 1
245 ip._hidden_attr = 1
246 ip._x = {}
246 ip._x = {}
247 c = ip.Completer
247 c = ip.Completer
248 ip.ex('ip=get_ipython()')
248 ip.ex('ip=get_ipython()')
249 cfg = Config()
249 cfg = Config()
250 cfg.IPCompleter.omit__names = 0
250 cfg.IPCompleter.omit__names = 0
251 c.update_config(cfg)
251 c.update_config(cfg)
252 s,matches = c.complete('ip.')
252 s,matches = c.complete('ip.')
253 nt.assert_in('ip.__str__', matches)
253 nt.assert_in('ip.__str__', matches)
254 nt.assert_in('ip._hidden_attr', matches)
254 nt.assert_in('ip._hidden_attr', matches)
255 cfg.IPCompleter.omit__names = 1
255 cfg.IPCompleter.omit__names = 1
256 c.update_config(cfg)
256 c.update_config(cfg)
257 s,matches = c.complete('ip.')
257 s,matches = c.complete('ip.')
258 nt.assert_not_in('ip.__str__', matches)
258 nt.assert_not_in('ip.__str__', matches)
259 nt.assert_in('ip._hidden_attr', matches)
259 nt.assert_in('ip._hidden_attr', matches)
260 cfg.IPCompleter.omit__names = 2
260 cfg.IPCompleter.omit__names = 2
261 c.update_config(cfg)
261 c.update_config(cfg)
262 s,matches = c.complete('ip.')
262 s,matches = c.complete('ip.')
263 nt.assert_not_in('ip.__str__', matches)
263 nt.assert_not_in('ip.__str__', matches)
264 nt.assert_not_in('ip._hidden_attr', matches)
264 nt.assert_not_in('ip._hidden_attr', matches)
265 s,matches = c.complete('ip._x.')
265 s,matches = c.complete('ip._x.')
266 nt.assert_in('ip._x.keys', matches)
266 nt.assert_in('ip._x.keys', matches)
267 del ip._hidden_attr
267 del ip._hidden_attr
268
268
269
269
270 def test_limit_to__all__False_ok():
270 def test_limit_to__all__False_ok():
271 ip = get_ipython()
271 ip = get_ipython()
272 c = ip.Completer
272 c = ip.Completer
273 ip.ex('class D: x=24')
273 ip.ex('class D: x=24')
274 ip.ex('d=D()')
274 ip.ex('d=D()')
275 cfg = Config()
275 cfg = Config()
276 cfg.IPCompleter.limit_to__all__ = False
276 cfg.IPCompleter.limit_to__all__ = False
277 c.update_config(cfg)
277 c.update_config(cfg)
278 s, matches = c.complete('d.')
278 s, matches = c.complete('d.')
279 nt.assert_in('d.x', matches)
279 nt.assert_in('d.x', matches)
280
280
281
281
282 def test_limit_to__all__True_ok():
282 def test_limit_to__all__True_ok():
283 ip = get_ipython()
283 ip = get_ipython()
284 c = ip.Completer
284 c = ip.Completer
285 ip.ex('class D: x=24')
285 ip.ex('class D: x=24')
286 ip.ex('d=D()')
286 ip.ex('d=D()')
287 ip.ex("d.__all__=['z']")
287 ip.ex("d.__all__=['z']")
288 cfg = Config()
288 cfg = Config()
289 cfg.IPCompleter.limit_to__all__ = True
289 cfg.IPCompleter.limit_to__all__ = True
290 c.update_config(cfg)
290 c.update_config(cfg)
291 s, matches = c.complete('d.')
291 s, matches = c.complete('d.')
292 nt.assert_in('d.z', matches)
292 nt.assert_in('d.z', matches)
293 nt.assert_not_in('d.x', matches)
293 nt.assert_not_in('d.x', matches)
294
294
295
295
296 def test_get__all__entries_ok():
296 def test_get__all__entries_ok():
297 class A(object):
297 class A(object):
298 __all__ = ['x', 1]
298 __all__ = ['x', 1]
299 words = completer.get__all__entries(A())
299 words = completer.get__all__entries(A())
300 nt.assert_equal(words, ['x'])
300 nt.assert_equal(words, ['x'])
301
301
302
302
303 def test_get__all__entries_no__all__ok():
303 def test_get__all__entries_no__all__ok():
304 class A(object):
304 class A(object):
305 pass
305 pass
306 words = completer.get__all__entries(A())
306 words = completer.get__all__entries(A())
307 nt.assert_equal(words, [])
307 nt.assert_equal(words, [])
308
308
309
309
310 def test_func_kw_completions():
310 def test_func_kw_completions():
311 ip = get_ipython()
311 ip = get_ipython()
312 c = ip.Completer
312 c = ip.Completer
313 ip.ex('def myfunc(a=1,b=2): return a+b')
313 ip.ex('def myfunc(a=1,b=2): return a+b')
314 s, matches = c.complete(None, 'myfunc(1,b')
314 s, matches = c.complete(None, 'myfunc(1,b')
315 nt.assert_in('b=', matches)
315 nt.assert_in('b=', matches)
316 # Simulate completing with cursor right after b (pos==10):
316 # Simulate completing with cursor right after b (pos==10):
317 s, matches = c.complete(None, 'myfunc(1,b)', 10)
317 s, matches = c.complete(None, 'myfunc(1,b)', 10)
318 nt.assert_in('b=', matches)
318 nt.assert_in('b=', matches)
319 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
319 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
320 nt.assert_in('b=', matches)
320 nt.assert_in('b=', matches)
321 #builtin function
321 #builtin function
322 s, matches = c.complete(None, 'min(k, k')
322 s, matches = c.complete(None, 'min(k, k')
323 nt.assert_in('key=', matches)
323 nt.assert_in('key=', matches)
324
324
325
325
326 def test_default_arguments_from_docstring():
326 def test_default_arguments_from_docstring():
327 doc = min.__doc__
327 doc = min.__doc__
328 ip = get_ipython()
328 ip = get_ipython()
329 c = ip.Completer
329 c = ip.Completer
330 kwd = c._default_arguments_from_docstring(
330 kwd = c._default_arguments_from_docstring(
331 'min(iterable[, key=func]) -> value')
331 'min(iterable[, key=func]) -> value')
332 nt.assert_equal(kwd, ['key'])
332 nt.assert_equal(kwd, ['key'])
333 #with cython type etc
333 #with cython type etc
334 kwd = c._default_arguments_from_docstring(
334 kwd = c._default_arguments_from_docstring(
335 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
335 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
336 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
336 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
337 #white spaces
337 #white spaces
338 kwd = c._default_arguments_from_docstring(
338 kwd = c._default_arguments_from_docstring(
339 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
339 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
340 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
340 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
341
341
342 def test_line_magics():
342 def test_line_magics():
343 ip = get_ipython()
343 ip = get_ipython()
344 c = ip.Completer
344 c = ip.Completer
345 s, matches = c.complete(None, 'lsmag')
345 s, matches = c.complete(None, 'lsmag')
346 nt.assert_in('%lsmagic', matches)
346 nt.assert_in('%lsmagic', matches)
347 s, matches = c.complete(None, '%lsmag')
347 s, matches = c.complete(None, '%lsmag')
348 nt.assert_in('%lsmagic', matches)
348 nt.assert_in('%lsmagic', matches)
349
349
350
350
351 def test_cell_magics():
351 def test_cell_magics():
352 from IPython.core.magic import register_cell_magic
352 from IPython.core.magic import register_cell_magic
353
353
354 @register_cell_magic
354 @register_cell_magic
355 def _foo_cellm(line, cell):
355 def _foo_cellm(line, cell):
356 pass
356 pass
357
357
358 ip = get_ipython()
358 ip = get_ipython()
359 c = ip.Completer
359 c = ip.Completer
360
360
361 s, matches = c.complete(None, '_foo_ce')
361 s, matches = c.complete(None, '_foo_ce')
362 nt.assert_in('%%_foo_cellm', matches)
362 nt.assert_in('%%_foo_cellm', matches)
363 s, matches = c.complete(None, '%%_foo_ce')
363 s, matches = c.complete(None, '%%_foo_ce')
364 nt.assert_in('%%_foo_cellm', matches)
364 nt.assert_in('%%_foo_cellm', matches)
365
365
366
366
367 def test_line_cell_magics():
367 def test_line_cell_magics():
368 from IPython.core.magic import register_line_cell_magic
368 from IPython.core.magic import register_line_cell_magic
369
369
370 @register_line_cell_magic
370 @register_line_cell_magic
371 def _bar_cellm(line, cell):
371 def _bar_cellm(line, cell):
372 pass
372 pass
373
373
374 ip = get_ipython()
374 ip = get_ipython()
375 c = ip.Completer
375 c = ip.Completer
376
376
377 # The policy here is trickier, see comments in completion code. The
377 # The policy here is trickier, see comments in completion code. The
378 # returned values depend on whether the user passes %% or not explicitly,
378 # returned values depend on whether the user passes %% or not explicitly,
379 # and this will show a difference if the same name is both a line and cell
379 # and this will show a difference if the same name is both a line and cell
380 # magic.
380 # magic.
381 s, matches = c.complete(None, '_bar_ce')
381 s, matches = c.complete(None, '_bar_ce')
382 nt.assert_in('%_bar_cellm', matches)
382 nt.assert_in('%_bar_cellm', matches)
383 nt.assert_in('%%_bar_cellm', matches)
383 nt.assert_in('%%_bar_cellm', matches)
384 s, matches = c.complete(None, '%_bar_ce')
384 s, matches = c.complete(None, '%_bar_ce')
385 nt.assert_in('%_bar_cellm', matches)
385 nt.assert_in('%_bar_cellm', matches)
386 nt.assert_in('%%_bar_cellm', matches)
386 nt.assert_in('%%_bar_cellm', matches)
387 s, matches = c.complete(None, '%%_bar_ce')
387 s, matches = c.complete(None, '%%_bar_ce')
388 nt.assert_not_in('%_bar_cellm', matches)
388 nt.assert_not_in('%_bar_cellm', matches)
389 nt.assert_in('%%_bar_cellm', matches)
389 nt.assert_in('%%_bar_cellm', matches)
390
390
391
391
392 def test_magic_completion_order():
392 def test_magic_completion_order():
393
393
394 ip = get_ipython()
394 ip = get_ipython()
395 c = ip.Completer
395 c = ip.Completer
396
396
397 # Test ordering of magics and non-magics with the same name
397 # Test ordering of magics and non-magics with the same name
398 # We want the non-magic first
398 # We want the non-magic first
399
399
400 # Before importing matplotlib, there should only be one option:
400 # Before importing matplotlib, there should only be one option:
401
401
402 text, matches = c.complete('mat')
402 text, matches = c.complete('mat')
403 nt.assert_equal(matches, ["%matplotlib"])
403 nt.assert_equal(matches, ["%matplotlib"])
404
404
405
405
406 ip.run_cell("matplotlib = 1") # introduce name into namespace
406 ip.run_cell("matplotlib = 1") # introduce name into namespace
407
407
408 # After the import, there should be two options, ordered like this:
408 # After the import, there should be two options, ordered like this:
409 text, matches = c.complete('mat')
409 text, matches = c.complete('mat')
410 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
410 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
411
411
412
412
413 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
413 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
414
414
415 # Order of user variable and line and cell magics with same name:
415 # Order of user variable and line and cell magics with same name:
416 text, matches = c.complete('timeit')
416 text, matches = c.complete('timeit')
417 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
417 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
418
418
419
419
420 def test_dict_key_completion_string():
420 def test_dict_key_completion_string():
421 """Test dictionary key completion for string keys"""
421 """Test dictionary key completion for string keys"""
422 ip = get_ipython()
422 ip = get_ipython()
423 complete = ip.Completer.complete
423 complete = ip.Completer.complete
424
424
425 ip.user_ns['d'] = {'abc': None}
425 ip.user_ns['d'] = {'abc': None}
426
426
427 # check completion at different stages
427 # check completion at different stages
428 _, matches = complete(line_buffer="d[")
428 _, matches = complete(line_buffer="d[")
429 nt.assert_in("'abc'", matches)
429 nt.assert_in("'abc'", matches)
430 nt.assert_not_in("'abc']", matches)
430 nt.assert_not_in("'abc']", matches)
431
431
432 _, matches = complete(line_buffer="d['")
432 _, matches = complete(line_buffer="d['")
433 nt.assert_in("abc", matches)
433 nt.assert_in("abc", matches)
434 nt.assert_not_in("abc']", matches)
434 nt.assert_not_in("abc']", matches)
435
435
436 _, matches = complete(line_buffer="d['a")
436 _, matches = complete(line_buffer="d['a")
437 nt.assert_in("abc", matches)
437 nt.assert_in("abc", matches)
438 nt.assert_not_in("abc']", matches)
438 nt.assert_not_in("abc']", matches)
439
439
440 # check use of different quoting
440 # check use of different quoting
441 _, matches = complete(line_buffer="d[\"")
441 _, matches = complete(line_buffer="d[\"")
442 nt.assert_in("abc", matches)
442 nt.assert_in("abc", matches)
443 nt.assert_not_in('abc\"]', matches)
443 nt.assert_not_in('abc\"]', matches)
444
444
445 _, matches = complete(line_buffer="d[\"a")
445 _, matches = complete(line_buffer="d[\"a")
446 nt.assert_in("abc", matches)
446 nt.assert_in("abc", matches)
447 nt.assert_not_in('abc\"]', matches)
447 nt.assert_not_in('abc\"]', matches)
448
448
449 # check sensitivity to following context
449 # check sensitivity to following context
450 _, matches = complete(line_buffer="d[]", cursor_pos=2)
450 _, matches = complete(line_buffer="d[]", cursor_pos=2)
451 nt.assert_in("'abc'", matches)
451 nt.assert_in("'abc'", matches)
452
452
453 _, matches = complete(line_buffer="d['']", cursor_pos=3)
453 _, matches = complete(line_buffer="d['']", cursor_pos=3)
454 nt.assert_in("abc", matches)
454 nt.assert_in("abc", matches)
455 nt.assert_not_in("abc'", matches)
455 nt.assert_not_in("abc'", matches)
456 nt.assert_not_in("abc']", matches)
456 nt.assert_not_in("abc']", matches)
457
457
458 # check multiple solutions are correctly returned and that noise is not
458 # check multiple solutions are correctly returned and that noise is not
459 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
459 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
460 5: None}
460 5: None}
461
461
462 _, matches = complete(line_buffer="d['a")
462 _, matches = complete(line_buffer="d['a")
463 nt.assert_in("abc", matches)
463 nt.assert_in("abc", matches)
464 nt.assert_in("abd", matches)
464 nt.assert_in("abd", matches)
465 nt.assert_not_in("bad", matches)
465 nt.assert_not_in("bad", matches)
466 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
466 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
467
467
468 # check escaping and whitespace
468 # check escaping and whitespace
469 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
469 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
470 _, matches = complete(line_buffer="d['a")
470 _, matches = complete(line_buffer="d['a")
471 nt.assert_in("a\\nb", matches)
471 nt.assert_in("a\\nb", matches)
472 nt.assert_in("a\\'b", matches)
472 nt.assert_in("a\\'b", matches)
473 nt.assert_in("a\"b", matches)
473 nt.assert_in("a\"b", matches)
474 nt.assert_in("a word", matches)
474 nt.assert_in("a word", matches)
475 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
475 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
476
476
477 # - can complete on non-initial word of the string
477 # - can complete on non-initial word of the string
478 _, matches = complete(line_buffer="d['a w")
478 _, matches = complete(line_buffer="d['a w")
479 nt.assert_in("word", matches)
479 nt.assert_in("word", matches)
480
480
481 # - understands quote escaping
481 # - understands quote escaping
482 _, matches = complete(line_buffer="d['a\\'")
482 _, matches = complete(line_buffer="d['a\\'")
483 nt.assert_in("b", matches)
483 nt.assert_in("b", matches)
484
484
485 # - default quoting should work like repr
485 # - default quoting should work like repr
486 _, matches = complete(line_buffer="d[")
486 _, matches = complete(line_buffer="d[")
487 nt.assert_in("\"a'b\"", matches)
487 nt.assert_in("\"a'b\"", matches)
488
488
489 # - when opening quote with ", possible to match with unescaped apostrophe
489 # - when opening quote with ", possible to match with unescaped apostrophe
490 _, matches = complete(line_buffer="d[\"a'")
490 _, matches = complete(line_buffer="d[\"a'")
491 nt.assert_in("b", matches)
491 nt.assert_in("b", matches)
492
492
493
493
494 def test_dict_key_completion_contexts():
494 def test_dict_key_completion_contexts():
495 """Test expression contexts in which dict key completion occurs"""
495 """Test expression contexts in which dict key completion occurs"""
496 ip = get_ipython()
496 ip = get_ipython()
497 complete = ip.Completer.complete
497 complete = ip.Completer.complete
498 d = {'abc': None}
498 d = {'abc': None}
499 ip.user_ns['d'] = d
499 ip.user_ns['d'] = d
500
500
501 class C:
501 class C:
502 data = d
502 data = d
503 ip.user_ns['C'] = C
503 ip.user_ns['C'] = C
504 ip.user_ns['get'] = lambda: d
504 ip.user_ns['get'] = lambda: d
505
505
506 def assert_no_completion(**kwargs):
506 def assert_no_completion(**kwargs):
507 _, matches = complete(**kwargs)
507 _, matches = complete(**kwargs)
508 nt.assert_not_in('abc', matches)
508 nt.assert_not_in('abc', matches)
509 nt.assert_not_in('abc\'', matches)
509 nt.assert_not_in('abc\'', matches)
510 nt.assert_not_in('abc\']', matches)
510 nt.assert_not_in('abc\']', matches)
511 nt.assert_not_in('\'abc\'', matches)
511 nt.assert_not_in('\'abc\'', matches)
512 nt.assert_not_in('\'abc\']', matches)
512 nt.assert_not_in('\'abc\']', matches)
513
513
514 def assert_completion(**kwargs):
514 def assert_completion(**kwargs):
515 _, matches = complete(**kwargs)
515 _, matches = complete(**kwargs)
516 nt.assert_in("'abc'", matches)
516 nt.assert_in("'abc'", matches)
517 nt.assert_not_in("'abc']", matches)
517 nt.assert_not_in("'abc']", matches)
518
518
519 # no completion after string closed, even if reopened
519 # no completion after string closed, even if reopened
520 assert_no_completion(line_buffer="d['a'")
520 assert_no_completion(line_buffer="d['a'")
521 assert_no_completion(line_buffer="d[\"a\"")
521 assert_no_completion(line_buffer="d[\"a\"")
522 assert_no_completion(line_buffer="d['a' + ")
522 assert_no_completion(line_buffer="d['a' + ")
523 assert_no_completion(line_buffer="d['a' + '")
523 assert_no_completion(line_buffer="d['a' + '")
524
524
525 # completion in non-trivial expressions
525 # completion in non-trivial expressions
526 assert_completion(line_buffer="+ d[")
526 assert_completion(line_buffer="+ d[")
527 assert_completion(line_buffer="(d[")
527 assert_completion(line_buffer="(d[")
528 assert_completion(line_buffer="C.data[")
528 assert_completion(line_buffer="C.data[")
529
529
530 # greedy flag
530 # greedy flag
531 def assert_completion(**kwargs):
531 def assert_completion(**kwargs):
532 _, matches = complete(**kwargs)
532 _, matches = complete(**kwargs)
533 nt.assert_in("get()['abc']", matches)
533 nt.assert_in("get()['abc']", matches)
534
534
535 assert_no_completion(line_buffer="get()[")
535 assert_no_completion(line_buffer="get()[")
536 with greedy_completion():
536 with greedy_completion():
537 assert_completion(line_buffer="get()[")
537 assert_completion(line_buffer="get()[")
538 assert_completion(line_buffer="get()['")
538 assert_completion(line_buffer="get()['")
539 assert_completion(line_buffer="get()['a")
539 assert_completion(line_buffer="get()['a")
540 assert_completion(line_buffer="get()['ab")
540 assert_completion(line_buffer="get()['ab")
541 assert_completion(line_buffer="get()['abc")
541 assert_completion(line_buffer="get()['abc")
542
542
543
543
544
544
545 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
545 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
546 def test_dict_key_completion_bytes():
546 def test_dict_key_completion_bytes():
547 """Test handling of bytes in dict key completion"""
547 """Test handling of bytes in dict key completion"""
548 ip = get_ipython()
548 ip = get_ipython()
549 complete = ip.Completer.complete
549 complete = ip.Completer.complete
550
550
551 ip.user_ns['d'] = {'abc': None, b'abd': None}
551 ip.user_ns['d'] = {'abc': None, b'abd': None}
552
552
553 _, matches = complete(line_buffer="d[")
553 _, matches = complete(line_buffer="d[")
554 nt.assert_in("'abc'", matches)
554 nt.assert_in("'abc'", matches)
555 nt.assert_in("b'abd'", matches)
555 nt.assert_in("b'abd'", matches)
556
556
557 if False: # not currently implemented
557 if False: # not currently implemented
558 _, matches = complete(line_buffer="d[b")
558 _, matches = complete(line_buffer="d[b")
559 nt.assert_in("b'abd'", matches)
559 nt.assert_in("b'abd'", matches)
560 nt.assert_not_in("b'abc'", matches)
560 nt.assert_not_in("b'abc'", matches)
561
561
562 _, matches = complete(line_buffer="d[b'")
562 _, matches = complete(line_buffer="d[b'")
563 nt.assert_in("abd", matches)
563 nt.assert_in("abd", matches)
564 nt.assert_not_in("abc", matches)
564 nt.assert_not_in("abc", matches)
565
565
566 _, matches = complete(line_buffer="d[B'")
566 _, matches = complete(line_buffer="d[B'")
567 nt.assert_in("abd", matches)
567 nt.assert_in("abd", matches)
568 nt.assert_not_in("abc", matches)
568 nt.assert_not_in("abc", matches)
569
569
570 _, matches = complete(line_buffer="d['")
570 _, matches = complete(line_buffer="d['")
571 nt.assert_in("abc", matches)
571 nt.assert_in("abc", matches)
572 nt.assert_not_in("abd", matches)
572 nt.assert_not_in("abd", matches)
573
573
574
574
575 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
575 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
576 def test_dict_key_completion_unicode_py2():
576 def test_dict_key_completion_unicode_py2():
577 """Test handling of unicode in dict key completion"""
577 """Test handling of unicode in dict key completion"""
578 ip = get_ipython()
578 ip = get_ipython()
579 complete = ip.Completer.complete
579 complete = ip.Completer.complete
580
580
581 ip.user_ns['d'] = {u'abc': None,
581 ip.user_ns['d'] = {u'abc': None,
582 u'a\u05d0b': None}
582 u'a\u05d0b': None}
583
583
584 _, matches = complete(line_buffer="d[")
584 _, matches = complete(line_buffer="d[")
585 nt.assert_in("u'abc'", matches)
585 nt.assert_in("u'abc'", matches)
586 nt.assert_in("u'a\\u05d0b'", matches)
586 nt.assert_in("u'a\\u05d0b'", matches)
587
587
588 _, matches = complete(line_buffer="d['a")
588 _, matches = complete(line_buffer="d['a")
589 nt.assert_in("abc", matches)
589 nt.assert_in("abc", matches)
590 nt.assert_not_in("a\\u05d0b", matches)
590 nt.assert_not_in("a\\u05d0b", matches)
591
591
592 _, matches = complete(line_buffer="d[u'a")
592 _, matches = complete(line_buffer="d[u'a")
593 nt.assert_in("abc", matches)
593 nt.assert_in("abc", matches)
594 nt.assert_in("a\\u05d0b", matches)
594 nt.assert_in("a\\u05d0b", matches)
595
595
596 _, matches = complete(line_buffer="d[U'a")
596 _, matches = complete(line_buffer="d[U'a")
597 nt.assert_in("abc", matches)
597 nt.assert_in("abc", matches)
598 nt.assert_in("a\\u05d0b", matches)
598 nt.assert_in("a\\u05d0b", matches)
599
599
600 # query using escape
600 # query using escape
601 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
601 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
602 nt.assert_in("u05d0b", matches) # tokenized after \\
602 nt.assert_in("u05d0b", matches) # tokenized after \\
603
603
604 # query using character
604 # query using character
605 _, matches = complete(line_buffer=u"d[u'a\u05d0")
605 _, matches = complete(line_buffer=u"d[u'a\u05d0")
606 nt.assert_in(u"a\u05d0b", matches)
606 nt.assert_in(u"a\u05d0b", matches)
607
607
608 with greedy_completion():
608 with greedy_completion():
609 _, matches = complete(line_buffer="d[")
609 _, matches = complete(line_buffer="d[")
610 nt.assert_in("d[u'abc']", matches)
610 nt.assert_in("d[u'abc']", matches)
611 nt.assert_in("d[u'a\\u05d0b']", matches)
611 nt.assert_in("d[u'a\\u05d0b']", matches)
612
612
613 _, matches = complete(line_buffer="d['a")
613 _, matches = complete(line_buffer="d['a")
614 nt.assert_in("d['abc']", matches)
614 nt.assert_in("d['abc']", matches)
615 nt.assert_not_in("d[u'a\\u05d0b']", matches)
615 nt.assert_not_in("d[u'a\\u05d0b']", matches)
616
616
617 _, matches = complete(line_buffer="d[u'a")
617 _, matches = complete(line_buffer="d[u'a")
618 nt.assert_in("d[u'abc']", matches)
618 nt.assert_in("d[u'abc']", matches)
619 nt.assert_in("d[u'a\\u05d0b']", matches)
619 nt.assert_in("d[u'a\\u05d0b']", matches)
620
620
621 _, matches = complete(line_buffer="d[U'a")
621 _, matches = complete(line_buffer="d[U'a")
622 nt.assert_in("d[U'abc']", matches)
622 nt.assert_in("d[U'abc']", matches)
623 nt.assert_in("d[U'a\\u05d0b']", matches)
623 nt.assert_in("d[U'a\\u05d0b']", matches)
624
624
625 # query using escape
625 # query using escape
626 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
626 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
627 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
627 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
628
628
629 # query using character
629 # query using character
630 _, matches = complete(line_buffer=u"d[u'a\u05d0")
630 _, matches = complete(line_buffer=u"d[u'a\u05d0")
631 nt.assert_in(u"d[u'a\u05d0b']", matches)
631 nt.assert_in(u"d[u'a\u05d0b']", matches)
632
632
633
633
634 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
634 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
635 def test_dict_key_completion_unicode_py3():
635 def test_dict_key_completion_unicode_py3():
636 """Test handling of unicode in dict key completion"""
636 """Test handling of unicode in dict key completion"""
637 ip = get_ipython()
637 ip = get_ipython()
638 complete = ip.Completer.complete
638 complete = ip.Completer.complete
639
639
640 ip.user_ns['d'] = {u'a\u05d0': None}
640 ip.user_ns['d'] = {u'a\u05d0': None}
641
641
642 # query using escape
642 # query using escape
643 _, matches = complete(line_buffer="d['a\\u05d0")
643 _, matches = complete(line_buffer="d['a\\u05d0")
644 nt.assert_in("u05d0", matches) # tokenized after \\
644 nt.assert_in("u05d0", matches) # tokenized after \\
645
645
646 # query using character
646 # query using character
647 _, matches = complete(line_buffer="d['a\u05d0")
647 _, matches = complete(line_buffer="d['a\u05d0")
648 nt.assert_in(u"a\u05d0", matches)
648 nt.assert_in(u"a\u05d0", matches)
649
649
650 with greedy_completion():
650 with greedy_completion():
651 # query using escape
651 # query using escape
652 _, matches = complete(line_buffer="d['a\\u05d0")
652 _, matches = complete(line_buffer="d['a\\u05d0")
653 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
653 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
654
654
655 # query using character
655 # query using character
656 _, matches = complete(line_buffer="d['a\u05d0")
656 _, matches = complete(line_buffer="d['a\u05d0")
657 nt.assert_in(u"d['a\u05d0']", matches)
657 nt.assert_in(u"d['a\u05d0']", matches)
658
658
659
659
660
660
661 @dec.skip_without('numpy')
661 @dec.skip_without('numpy')
662 def test_struct_array_key_completion():
662 def test_struct_array_key_completion():
663 """Test dict key completion applies to numpy struct arrays"""
663 """Test dict key completion applies to numpy struct arrays"""
664 import numpy
664 import numpy
665 ip = get_ipython()
665 ip = get_ipython()
666 complete = ip.Completer.complete
666 complete = ip.Completer.complete
667 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
667 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
668 _, matches = complete(line_buffer="d['")
668 _, matches = complete(line_buffer="d['")
669 nt.assert_in("hello", matches)
669 nt.assert_in("hello", matches)
670 nt.assert_in("world", matches)
670 nt.assert_in("world", matches)
671 # complete on the numpy struct itself
672 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
673 ('my_data', '>f4', 5)])
674 x = numpy.zeros(2, dtype=dt)
675 ip.user_ns['d'] = x[1]
676 _, matches = complete(line_buffer="d['")
677 nt.assert_in("my_head", matches)
678 nt.assert_in("my_data", matches)
679 # complete on a nested level
680 with greedy_completion():
681 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
682 _, matches = complete(line_buffer="d[1]['my_head']['")
683 nt.assert_true(any(["my_dt" in m for m in matches]))
684 nt.assert_true(any(["my_df" in m for m in matches]))
671
685
672
686
673 @dec.skip_without('pandas')
687 @dec.skip_without('pandas')
674 def test_dataframe_key_completion():
688 def test_dataframe_key_completion():
675 """Test dict key completion applies to pandas DataFrames"""
689 """Test dict key completion applies to pandas DataFrames"""
676 import pandas
690 import pandas
677 ip = get_ipython()
691 ip = get_ipython()
678 complete = ip.Completer.complete
692 complete = ip.Completer.complete
679 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
693 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
680 _, matches = complete(line_buffer="d['")
694 _, matches = complete(line_buffer="d['")
681 nt.assert_in("hello", matches)
695 nt.assert_in("hello", matches)
682 nt.assert_in("world", matches)
696 nt.assert_in("world", matches)
683
697
684
698
685 def test_dict_key_completion_invalids():
699 def test_dict_key_completion_invalids():
686 """Smoke test cases dict key completion can't handle"""
700 """Smoke test cases dict key completion can't handle"""
687 ip = get_ipython()
701 ip = get_ipython()
688 complete = ip.Completer.complete
702 complete = ip.Completer.complete
689
703
690 ip.user_ns['no_getitem'] = None
704 ip.user_ns['no_getitem'] = None
691 ip.user_ns['no_keys'] = []
705 ip.user_ns['no_keys'] = []
692 ip.user_ns['cant_call_keys'] = dict
706 ip.user_ns['cant_call_keys'] = dict
693 ip.user_ns['empty'] = {}
707 ip.user_ns['empty'] = {}
694 ip.user_ns['d'] = {'abc': 5}
708 ip.user_ns['d'] = {'abc': 5}
695
709
696 _, matches = complete(line_buffer="no_getitem['")
710 _, matches = complete(line_buffer="no_getitem['")
697 _, matches = complete(line_buffer="no_keys['")
711 _, matches = complete(line_buffer="no_keys['")
698 _, matches = complete(line_buffer="cant_call_keys['")
712 _, matches = complete(line_buffer="cant_call_keys['")
699 _, matches = complete(line_buffer="empty['")
713 _, matches = complete(line_buffer="empty['")
700 _, matches = complete(line_buffer="name_error['")
714 _, matches = complete(line_buffer="name_error['")
701 _, matches = complete(line_buffer="d['\\") # incomplete escape
715 _, matches = complete(line_buffer="d['\\") # incomplete escape
General Comments 0
You need to be logged in to leave comments. Login now