##// END OF EJS Templates
Rename method to _ipython_key_completions_
Thomas Kluyver -
Show More
@@ -1,1289 +1,1289 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Word completion for IPython.
2 """Word completion for IPython.
3
3
4 This module is a fork of the rlcompleter module in the Python standard
4 This module is a fork of the rlcompleter module in the Python standard
5 library. The original enhancements made to rlcompleter have been sent
5 library. The original enhancements made to rlcompleter have been sent
6 upstream and were accepted as of Python 2.3, but we need a lot more
6 upstream and were accepted as of Python 2.3, but we need a lot more
7 functionality specific to IPython, so this module will continue to live as an
7 functionality specific to IPython, so this module will continue to live as an
8 IPython-specific utility.
8 IPython-specific utility.
9
9
10 Original rlcompleter documentation:
10 Original rlcompleter documentation:
11
11
12 This requires the latest extension to the readline module (the
12 This requires the latest extension to the readline module (the
13 completes keywords, built-ins and globals in __main__; when completing
13 completes keywords, built-ins and globals in __main__; when completing
14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
15 completes its attributes.
15 completes its attributes.
16
16
17 It's very cool to do "import string" type "string.", hit the
17 It's very cool to do "import string" type "string.", hit the
18 completion key (twice), and see the list of names defined by the
18 completion key (twice), and see the list of names defined by the
19 string module!
19 string module!
20
20
21 Tip: to use the tab key as the completion key, call
21 Tip: to use the tab key as the completion key, call
22
22
23 readline.parse_and_bind("tab: complete")
23 readline.parse_and_bind("tab: complete")
24
24
25 Notes:
25 Notes:
26
26
27 - Exceptions raised by the completer function are *ignored* (and
27 - Exceptions raised by the completer function are *ignored* (and
28 generally cause the completion to fail). This is a feature -- since
28 generally cause the completion to fail). This is a feature -- since
29 readline sets the tty device in raw (or cbreak) mode, printing a
29 readline sets the tty device in raw (or cbreak) mode, printing a
30 traceback wouldn't work well without some complicated hoopla to save,
30 traceback wouldn't work well without some complicated hoopla to save,
31 reset and restore the tty state.
31 reset and restore the tty state.
32
32
33 - The evaluation of the NAME.NAME... form may cause arbitrary
33 - The evaluation of the NAME.NAME... form may cause arbitrary
34 application defined code to be executed if an object with a
34 application defined code to be executed if an object with a
35 ``__getattr__`` hook is found. Since it is the responsibility of the
35 ``__getattr__`` hook is found. Since it is the responsibility of the
36 application (or the user) to enable this feature, I consider this an
36 application (or the user) to enable this feature, I consider this an
37 acceptable risk. More complicated expressions (e.g. function calls or
37 acceptable risk. More complicated expressions (e.g. function calls or
38 indexing operations) are *not* evaluated.
38 indexing operations) are *not* evaluated.
39
39
40 - GNU readline is also used by the built-in functions input() and
40 - GNU readline is also used by the built-in functions input() and
41 raw_input(), and thus these also benefit/suffer from the completer
41 raw_input(), and thus these also benefit/suffer from the completer
42 features. Clearly an interactive application can benefit by
42 features. Clearly an interactive application can benefit by
43 specifying its own completer function and using raw_input() for all
43 specifying its own completer function and using raw_input() for all
44 its input.
44 its input.
45
45
46 - When the original stdin is not a tty device, GNU readline is never
46 - When the original stdin is not a tty device, GNU readline is never
47 used, and this module (and the readline module) are silently inactive.
47 used, and this module (and the readline module) are silently inactive.
48 """
48 """
49
49
50 # Copyright (c) IPython Development Team.
50 # Copyright (c) IPython Development Team.
51 # Distributed under the terms of the Modified BSD License.
51 # Distributed under the terms of the Modified BSD License.
52 #
52 #
53 # Some of this code originated from rlcompleter in the Python standard library
53 # Some of this code originated from rlcompleter in the Python standard library
54 # Copyright (C) 2001 Python Software Foundation, www.python.org
54 # Copyright (C) 2001 Python Software Foundation, www.python.org
55
55
56 import __main__
56 import __main__
57 import glob
57 import glob
58 import inspect
58 import inspect
59 import itertools
59 import itertools
60 import keyword
60 import keyword
61 import os
61 import os
62 import re
62 import re
63 import sys
63 import sys
64 import unicodedata
64 import unicodedata
65 import string
65 import string
66
66
67 from traitlets.config.configurable import Configurable
67 from traitlets.config.configurable import Configurable
68 from IPython.core.error import TryNext
68 from IPython.core.error import TryNext
69 from IPython.core.inputsplitter import ESC_MAGIC
69 from IPython.core.inputsplitter import ESC_MAGIC
70 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
70 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
71 from IPython.utils import generics
71 from IPython.utils import generics
72 from IPython.utils import io
72 from IPython.utils import io
73 from IPython.utils.decorators import undoc
73 from IPython.utils.decorators import undoc
74 from IPython.utils.dir2 import dir2, safe_hasattr
74 from IPython.utils.dir2 import dir2, safe_hasattr
75 from IPython.utils.process import arg_split
75 from IPython.utils.process import arg_split
76 from IPython.utils.py3compat import builtin_mod, string_types, PY3
76 from IPython.utils.py3compat import builtin_mod, string_types, PY3
77 from traitlets import CBool, Enum
77 from traitlets import CBool, Enum
78
78
79 #-----------------------------------------------------------------------------
79 #-----------------------------------------------------------------------------
80 # Globals
80 # Globals
81 #-----------------------------------------------------------------------------
81 #-----------------------------------------------------------------------------
82
82
83 # Public API
83 # Public API
84 __all__ = ['Completer','IPCompleter']
84 __all__ = ['Completer','IPCompleter']
85
85
86 if sys.platform == 'win32':
86 if sys.platform == 'win32':
87 PROTECTABLES = ' '
87 PROTECTABLES = ' '
88 else:
88 else:
89 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
89 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
90
90
91
91
92 #-----------------------------------------------------------------------------
92 #-----------------------------------------------------------------------------
93 # Main functions and classes
93 # Main functions and classes
94 #-----------------------------------------------------------------------------
94 #-----------------------------------------------------------------------------
95
95
96 def has_open_quotes(s):
96 def has_open_quotes(s):
97 """Return whether a string has open quotes.
97 """Return whether a string has open quotes.
98
98
99 This simply counts whether the number of quote characters of either type in
99 This simply counts whether the number of quote characters of either type in
100 the string is odd.
100 the string is odd.
101
101
102 Returns
102 Returns
103 -------
103 -------
104 If there is an open quote, the quote character is returned. Else, return
104 If there is an open quote, the quote character is returned. Else, return
105 False.
105 False.
106 """
106 """
107 # We check " first, then ', so complex cases with nested quotes will get
107 # We check " first, then ', so complex cases with nested quotes will get
108 # the " to take precedence.
108 # the " to take precedence.
109 if s.count('"') % 2:
109 if s.count('"') % 2:
110 return '"'
110 return '"'
111 elif s.count("'") % 2:
111 elif s.count("'") % 2:
112 return "'"
112 return "'"
113 else:
113 else:
114 return False
114 return False
115
115
116
116
117 def protect_filename(s):
117 def protect_filename(s):
118 """Escape a string to protect certain characters."""
118 """Escape a string to protect certain characters."""
119
119
120 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
120 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
121 for ch in s])
121 for ch in s])
122
122
123 def expand_user(path):
123 def expand_user(path):
124 """Expand '~'-style usernames in strings.
124 """Expand '~'-style usernames in strings.
125
125
126 This is similar to :func:`os.path.expanduser`, but it computes and returns
126 This is similar to :func:`os.path.expanduser`, but it computes and returns
127 extra information that will be useful if the input was being used in
127 extra information that will be useful if the input was being used in
128 computing completions, and you wish to return the completions with the
128 computing completions, and you wish to return the completions with the
129 original '~' instead of its expanded value.
129 original '~' instead of its expanded value.
130
130
131 Parameters
131 Parameters
132 ----------
132 ----------
133 path : str
133 path : str
134 String to be expanded. If no ~ is present, the output is the same as the
134 String to be expanded. If no ~ is present, the output is the same as the
135 input.
135 input.
136
136
137 Returns
137 Returns
138 -------
138 -------
139 newpath : str
139 newpath : str
140 Result of ~ expansion in the input path.
140 Result of ~ expansion in the input path.
141 tilde_expand : bool
141 tilde_expand : bool
142 Whether any expansion was performed or not.
142 Whether any expansion was performed or not.
143 tilde_val : str
143 tilde_val : str
144 The value that ~ was replaced with.
144 The value that ~ was replaced with.
145 """
145 """
146 # Default values
146 # Default values
147 tilde_expand = False
147 tilde_expand = False
148 tilde_val = ''
148 tilde_val = ''
149 newpath = path
149 newpath = path
150
150
151 if path.startswith('~'):
151 if path.startswith('~'):
152 tilde_expand = True
152 tilde_expand = True
153 rest = len(path)-1
153 rest = len(path)-1
154 newpath = os.path.expanduser(path)
154 newpath = os.path.expanduser(path)
155 if rest:
155 if rest:
156 tilde_val = newpath[:-rest]
156 tilde_val = newpath[:-rest]
157 else:
157 else:
158 tilde_val = newpath
158 tilde_val = newpath
159
159
160 return newpath, tilde_expand, tilde_val
160 return newpath, tilde_expand, tilde_val
161
161
162
162
163 def compress_user(path, tilde_expand, tilde_val):
163 def compress_user(path, tilde_expand, tilde_val):
164 """Does the opposite of expand_user, with its outputs.
164 """Does the opposite of expand_user, with its outputs.
165 """
165 """
166 if tilde_expand:
166 if tilde_expand:
167 return path.replace(tilde_val, '~')
167 return path.replace(tilde_val, '~')
168 else:
168 else:
169 return path
169 return path
170
170
171
171
172
172
173 def completions_sorting_key(word):
173 def completions_sorting_key(word):
174 """key for sorting completions
174 """key for sorting completions
175
175
176 This does several things:
176 This does several things:
177
177
178 - Lowercase all completions, so they are sorted alphabetically with
178 - Lowercase all completions, so they are sorted alphabetically with
179 upper and lower case words mingled
179 upper and lower case words mingled
180 - Demote any completions starting with underscores to the end
180 - Demote any completions starting with underscores to the end
181 - Insert any %magic and %%cellmagic completions in the alphabetical order
181 - Insert any %magic and %%cellmagic completions in the alphabetical order
182 by their name
182 by their name
183 """
183 """
184 # Case insensitive sort
184 # Case insensitive sort
185 word = word.lower()
185 word = word.lower()
186
186
187 prio1, prio2 = 0, 0
187 prio1, prio2 = 0, 0
188
188
189 if word.startswith('__'):
189 if word.startswith('__'):
190 prio1 = 2
190 prio1 = 2
191 elif word.startswith('_'):
191 elif word.startswith('_'):
192 prio1 = 1
192 prio1 = 1
193
193
194 if word.startswith('%%'):
194 if word.startswith('%%'):
195 # If there's another % in there, this is something else, so leave it alone
195 # If there's another % in there, this is something else, so leave it alone
196 if not "%" in word[2:]:
196 if not "%" in word[2:]:
197 word = word[2:]
197 word = word[2:]
198 prio2 = 2
198 prio2 = 2
199 elif word.startswith('%'):
199 elif word.startswith('%'):
200 if not "%" in word[1:]:
200 if not "%" in word[1:]:
201 word = word[1:]
201 word = word[1:]
202 prio2 = 1
202 prio2 = 1
203
203
204 return prio1, word, prio2
204 return prio1, word, prio2
205
205
206
206
207 @undoc
207 @undoc
208 class Bunch(object): pass
208 class Bunch(object): pass
209
209
210
210
211 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
211 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
212 GREEDY_DELIMS = ' =\r\n'
212 GREEDY_DELIMS = ' =\r\n'
213
213
214
214
215 class CompletionSplitter(object):
215 class CompletionSplitter(object):
216 """An object to split an input line in a manner similar to readline.
216 """An object to split an input line in a manner similar to readline.
217
217
218 By having our own implementation, we can expose readline-like completion in
218 By having our own implementation, we can expose readline-like completion in
219 a uniform manner to all frontends. This object only needs to be given the
219 a uniform manner to all frontends. This object only needs to be given the
220 line of text to be split and the cursor position on said line, and it
220 line of text to be split and the cursor position on said line, and it
221 returns the 'word' to be completed on at the cursor after splitting the
221 returns the 'word' to be completed on at the cursor after splitting the
222 entire line.
222 entire line.
223
223
224 What characters are used as splitting delimiters can be controlled by
224 What characters are used as splitting delimiters can be controlled by
225 setting the `delims` attribute (this is a property that internally
225 setting the `delims` attribute (this is a property that internally
226 automatically builds the necessary regular expression)"""
226 automatically builds the necessary regular expression)"""
227
227
228 # Private interface
228 # Private interface
229
229
230 # A string of delimiter characters. The default value makes sense for
230 # A string of delimiter characters. The default value makes sense for
231 # IPython's most typical usage patterns.
231 # IPython's most typical usage patterns.
232 _delims = DELIMS
232 _delims = DELIMS
233
233
234 # The expression (a normal string) to be compiled into a regular expression
234 # The expression (a normal string) to be compiled into a regular expression
235 # for actual splitting. We store it as an attribute mostly for ease of
235 # for actual splitting. We store it as an attribute mostly for ease of
236 # debugging, since this type of code can be so tricky to debug.
236 # debugging, since this type of code can be so tricky to debug.
237 _delim_expr = None
237 _delim_expr = None
238
238
239 # The regular expression that does the actual splitting
239 # The regular expression that does the actual splitting
240 _delim_re = None
240 _delim_re = None
241
241
242 def __init__(self, delims=None):
242 def __init__(self, delims=None):
243 delims = CompletionSplitter._delims if delims is None else delims
243 delims = CompletionSplitter._delims if delims is None else delims
244 self.delims = delims
244 self.delims = delims
245
245
246 @property
246 @property
247 def delims(self):
247 def delims(self):
248 """Return the string of delimiter characters."""
248 """Return the string of delimiter characters."""
249 return self._delims
249 return self._delims
250
250
251 @delims.setter
251 @delims.setter
252 def delims(self, delims):
252 def delims(self, delims):
253 """Set the delimiters for line splitting."""
253 """Set the delimiters for line splitting."""
254 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
254 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
255 self._delim_re = re.compile(expr)
255 self._delim_re = re.compile(expr)
256 self._delims = delims
256 self._delims = delims
257 self._delim_expr = expr
257 self._delim_expr = expr
258
258
259 def split_line(self, line, cursor_pos=None):
259 def split_line(self, line, cursor_pos=None):
260 """Split a line of text with a cursor at the given position.
260 """Split a line of text with a cursor at the given position.
261 """
261 """
262 l = line if cursor_pos is None else line[:cursor_pos]
262 l = line if cursor_pos is None else line[:cursor_pos]
263 return self._delim_re.split(l)[-1]
263 return self._delim_re.split(l)[-1]
264
264
265
265
266 class Completer(Configurable):
266 class Completer(Configurable):
267
267
268 greedy = CBool(False, config=True,
268 greedy = CBool(False, config=True,
269 help="""Activate greedy completion
269 help="""Activate greedy completion
270
270
271 This will enable completion on elements of lists, results of function calls, etc.,
271 This will enable completion on elements of lists, results of function calls, etc.,
272 but can be unsafe because the code is actually evaluated on TAB.
272 but can be unsafe because the code is actually evaluated on TAB.
273 """
273 """
274 )
274 )
275
275
276
276
277 def __init__(self, namespace=None, global_namespace=None, **kwargs):
277 def __init__(self, namespace=None, global_namespace=None, **kwargs):
278 """Create a new completer for the command line.
278 """Create a new completer for the command line.
279
279
280 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
280 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
281
281
282 If unspecified, the default namespace where completions are performed
282 If unspecified, the default namespace where completions are performed
283 is __main__ (technically, __main__.__dict__). Namespaces should be
283 is __main__ (technically, __main__.__dict__). Namespaces should be
284 given as dictionaries.
284 given as dictionaries.
285
285
286 An optional second namespace can be given. This allows the completer
286 An optional second namespace can be given. This allows the completer
287 to handle cases where both the local and global scopes need to be
287 to handle cases where both the local and global scopes need to be
288 distinguished.
288 distinguished.
289
289
290 Completer instances should be used as the completion mechanism of
290 Completer instances should be used as the completion mechanism of
291 readline via the set_completer() call:
291 readline via the set_completer() call:
292
292
293 readline.set_completer(Completer(my_namespace).complete)
293 readline.set_completer(Completer(my_namespace).complete)
294 """
294 """
295
295
296 # Don't bind to namespace quite yet, but flag whether the user wants a
296 # Don't bind to namespace quite yet, but flag whether the user wants a
297 # specific namespace or to use __main__.__dict__. This will allow us
297 # specific namespace or to use __main__.__dict__. This will allow us
298 # to bind to __main__.__dict__ at completion time, not now.
298 # to bind to __main__.__dict__ at completion time, not now.
299 if namespace is None:
299 if namespace is None:
300 self.use_main_ns = 1
300 self.use_main_ns = 1
301 else:
301 else:
302 self.use_main_ns = 0
302 self.use_main_ns = 0
303 self.namespace = namespace
303 self.namespace = namespace
304
304
305 # The global namespace, if given, can be bound directly
305 # The global namespace, if given, can be bound directly
306 if global_namespace is None:
306 if global_namespace is None:
307 self.global_namespace = {}
307 self.global_namespace = {}
308 else:
308 else:
309 self.global_namespace = global_namespace
309 self.global_namespace = global_namespace
310
310
311 super(Completer, self).__init__(**kwargs)
311 super(Completer, self).__init__(**kwargs)
312
312
313 def complete(self, text, state):
313 def complete(self, text, state):
314 """Return the next possible completion for 'text'.
314 """Return the next possible completion for 'text'.
315
315
316 This is called successively with state == 0, 1, 2, ... until it
316 This is called successively with state == 0, 1, 2, ... until it
317 returns None. The completion should begin with 'text'.
317 returns None. The completion should begin with 'text'.
318
318
319 """
319 """
320 if self.use_main_ns:
320 if self.use_main_ns:
321 self.namespace = __main__.__dict__
321 self.namespace = __main__.__dict__
322
322
323 if state == 0:
323 if state == 0:
324 if "." in text:
324 if "." in text:
325 self.matches = self.attr_matches(text)
325 self.matches = self.attr_matches(text)
326 else:
326 else:
327 self.matches = self.global_matches(text)
327 self.matches = self.global_matches(text)
328 try:
328 try:
329 return self.matches[state]
329 return self.matches[state]
330 except IndexError:
330 except IndexError:
331 return None
331 return None
332
332
333 def global_matches(self, text):
333 def global_matches(self, text):
334 """Compute matches when text is a simple name.
334 """Compute matches when text is a simple name.
335
335
336 Return a list of all keywords, built-in functions and names currently
336 Return a list of all keywords, built-in functions and names currently
337 defined in self.namespace or self.global_namespace that match.
337 defined in self.namespace or self.global_namespace that match.
338
338
339 """
339 """
340 #print 'Completer->global_matches, txt=%r' % text # dbg
340 #print 'Completer->global_matches, txt=%r' % text # dbg
341 matches = []
341 matches = []
342 match_append = matches.append
342 match_append = matches.append
343 n = len(text)
343 n = len(text)
344 for lst in [keyword.kwlist,
344 for lst in [keyword.kwlist,
345 builtin_mod.__dict__.keys(),
345 builtin_mod.__dict__.keys(),
346 self.namespace.keys(),
346 self.namespace.keys(),
347 self.global_namespace.keys()]:
347 self.global_namespace.keys()]:
348 for word in lst:
348 for word in lst:
349 if word[:n] == text and word != "__builtins__":
349 if word[:n] == text and word != "__builtins__":
350 match_append(word)
350 match_append(word)
351 return matches
351 return matches
352
352
353 def attr_matches(self, text):
353 def attr_matches(self, text):
354 """Compute matches when text contains a dot.
354 """Compute matches when text contains a dot.
355
355
356 Assuming the text is of the form NAME.NAME....[NAME], and is
356 Assuming the text is of the form NAME.NAME....[NAME], and is
357 evaluatable in self.namespace or self.global_namespace, it will be
357 evaluatable in self.namespace or self.global_namespace, it will be
358 evaluated and its attributes (as revealed by dir()) are used as
358 evaluated and its attributes (as revealed by dir()) are used as
359 possible completions. (For class instances, class members are are
359 possible completions. (For class instances, class members are are
360 also considered.)
360 also considered.)
361
361
362 WARNING: this can still invoke arbitrary C code, if an object
362 WARNING: this can still invoke arbitrary C code, if an object
363 with a __getattr__ hook is evaluated.
363 with a __getattr__ hook is evaluated.
364
364
365 """
365 """
366
366
367 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
367 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
368 # Another option, seems to work great. Catches things like ''.<tab>
368 # Another option, seems to work great. Catches things like ''.<tab>
369 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
369 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
370
370
371 if m:
371 if m:
372 expr, attr = m.group(1, 3)
372 expr, attr = m.group(1, 3)
373 elif self.greedy:
373 elif self.greedy:
374 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
374 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
375 if not m2:
375 if not m2:
376 return []
376 return []
377 expr, attr = m2.group(1,2)
377 expr, attr = m2.group(1,2)
378 else:
378 else:
379 return []
379 return []
380
380
381 try:
381 try:
382 obj = eval(expr, self.namespace)
382 obj = eval(expr, self.namespace)
383 except:
383 except:
384 try:
384 try:
385 obj = eval(expr, self.global_namespace)
385 obj = eval(expr, self.global_namespace)
386 except:
386 except:
387 return []
387 return []
388
388
389 if self.limit_to__all__ and hasattr(obj, '__all__'):
389 if self.limit_to__all__ and hasattr(obj, '__all__'):
390 words = get__all__entries(obj)
390 words = get__all__entries(obj)
391 else:
391 else:
392 words = dir2(obj)
392 words = dir2(obj)
393
393
394 try:
394 try:
395 words = generics.complete_object(obj, words)
395 words = generics.complete_object(obj, words)
396 except TryNext:
396 except TryNext:
397 pass
397 pass
398 except Exception:
398 except Exception:
399 # Silence errors from completion function
399 # Silence errors from completion function
400 #raise # dbg
400 #raise # dbg
401 pass
401 pass
402 # Build match list to return
402 # Build match list to return
403 n = len(attr)
403 n = len(attr)
404 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
404 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
405 return res
405 return res
406
406
407
407
408 def get__all__entries(obj):
408 def get__all__entries(obj):
409 """returns the strings in the __all__ attribute"""
409 """returns the strings in the __all__ attribute"""
410 try:
410 try:
411 words = getattr(obj, '__all__')
411 words = getattr(obj, '__all__')
412 except:
412 except:
413 return []
413 return []
414
414
415 return [w for w in words if isinstance(w, string_types)]
415 return [w for w in words if isinstance(w, string_types)]
416
416
417
417
418 def match_dict_keys(keys, prefix, delims):
418 def match_dict_keys(keys, prefix, delims):
419 """Used by dict_key_matches, matching the prefix to a list of keys"""
419 """Used by dict_key_matches, matching the prefix to a list of keys"""
420 if not prefix:
420 if not prefix:
421 return None, 0, [repr(k) for k in keys
421 return None, 0, [repr(k) for k in keys
422 if isinstance(k, (string_types, bytes))]
422 if isinstance(k, (string_types, bytes))]
423 quote_match = re.search('["\']', prefix)
423 quote_match = re.search('["\']', prefix)
424 quote = quote_match.group()
424 quote = quote_match.group()
425 try:
425 try:
426 prefix_str = eval(prefix + quote, {})
426 prefix_str = eval(prefix + quote, {})
427 except Exception:
427 except Exception:
428 return None, 0, []
428 return None, 0, []
429
429
430 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
430 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
431 token_match = re.search(pattern, prefix, re.UNICODE)
431 token_match = re.search(pattern, prefix, re.UNICODE)
432 token_start = token_match.start()
432 token_start = token_match.start()
433 token_prefix = token_match.group()
433 token_prefix = token_match.group()
434
434
435 # TODO: support bytes in Py3k
435 # TODO: support bytes in Py3k
436 matched = []
436 matched = []
437 for key in keys:
437 for key in keys:
438 try:
438 try:
439 if not key.startswith(prefix_str):
439 if not key.startswith(prefix_str):
440 continue
440 continue
441 except (AttributeError, TypeError, UnicodeError):
441 except (AttributeError, TypeError, UnicodeError):
442 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
442 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
443 continue
443 continue
444
444
445 # reformat remainder of key to begin with prefix
445 # reformat remainder of key to begin with prefix
446 rem = key[len(prefix_str):]
446 rem = key[len(prefix_str):]
447 # force repr wrapped in '
447 # force repr wrapped in '
448 rem_repr = repr(rem + '"')
448 rem_repr = repr(rem + '"')
449 if rem_repr.startswith('u') and prefix[0] not in 'uU':
449 if rem_repr.startswith('u') and prefix[0] not in 'uU':
450 # Found key is unicode, but prefix is Py2 string.
450 # Found key is unicode, but prefix is Py2 string.
451 # Therefore attempt to interpret key as string.
451 # Therefore attempt to interpret key as string.
452 try:
452 try:
453 rem_repr = repr(rem.encode('ascii') + '"')
453 rem_repr = repr(rem.encode('ascii') + '"')
454 except UnicodeEncodeError:
454 except UnicodeEncodeError:
455 continue
455 continue
456
456
457 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
457 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
458 if quote == '"':
458 if quote == '"':
459 # The entered prefix is quoted with ",
459 # The entered prefix is quoted with ",
460 # but the match is quoted with '.
460 # but the match is quoted with '.
461 # A contained " hence needs escaping for comparison:
461 # A contained " hence needs escaping for comparison:
462 rem_repr = rem_repr.replace('"', '\\"')
462 rem_repr = rem_repr.replace('"', '\\"')
463
463
464 # then reinsert prefix from start of token
464 # then reinsert prefix from start of token
465 matched.append('%s%s' % (token_prefix, rem_repr))
465 matched.append('%s%s' % (token_prefix, rem_repr))
466 return quote, token_start, matched
466 return quote, token_start, matched
467
467
468
468
469 def _safe_isinstance(obj, module, class_name):
469 def _safe_isinstance(obj, module, class_name):
470 """Checks if obj is an instance of module.class_name if loaded
470 """Checks if obj is an instance of module.class_name if loaded
471 """
471 """
472 return (module in sys.modules and
472 return (module in sys.modules and
473 isinstance(obj, getattr(__import__(module), class_name)))
473 isinstance(obj, getattr(__import__(module), class_name)))
474
474
475 def _safe_really_hasattr(obj, name):
475 def _safe_really_hasattr(obj, name):
476 """Checks that an object genuinely has a given attribute.
476 """Checks that an object genuinely has a given attribute.
477
477
478 Some objects claim to have any attribute that's requested, to act as a lazy
478 Some objects claim to have any attribute that's requested, to act as a lazy
479 proxy for something else. We want to catch these cases and ignore their
479 proxy for something else. We want to catch these cases and ignore their
480 claim to have the attribute we're interested in.
480 claim to have the attribute we're interested in.
481 """
481 """
482 if safe_hasattr(obj, '_ipy_proxy_check_dont_define_this_'):
482 if safe_hasattr(obj, '_ipy_proxy_check_dont_define_this_'):
483 # If it claims this exists, don't trust it
483 # If it claims this exists, don't trust it
484 return False
484 return False
485
485
486 return safe_hasattr(obj, name)
486 return safe_hasattr(obj, name)
487
487
488
488
489 def back_unicode_name_matches(text):
489 def back_unicode_name_matches(text):
490 u"""Match unicode characters back to unicode name
490 u"""Match unicode characters back to unicode name
491
491
492 This does β˜ƒ -> \\snowman
492 This does β˜ƒ -> \\snowman
493
493
494 Note that snowman is not a valid python3 combining character but will be expanded.
494 Note that snowman is not a valid python3 combining character but will be expanded.
495 Though it will not recombine back to the snowman character by the completion machinery.
495 Though it will not recombine back to the snowman character by the completion machinery.
496
496
497 This will not either back-complete standard sequences like \\n, \\b ...
497 This will not either back-complete standard sequences like \\n, \\b ...
498
498
499 Used on Python 3 only.
499 Used on Python 3 only.
500 """
500 """
501 if len(text)<2:
501 if len(text)<2:
502 return u'', ()
502 return u'', ()
503 maybe_slash = text[-2]
503 maybe_slash = text[-2]
504 if maybe_slash != '\\':
504 if maybe_slash != '\\':
505 return u'', ()
505 return u'', ()
506
506
507 char = text[-1]
507 char = text[-1]
508 # no expand on quote for completion in strings.
508 # no expand on quote for completion in strings.
509 # nor backcomplete standard ascii keys
509 # nor backcomplete standard ascii keys
510 if char in string.ascii_letters or char in ['"',"'"]:
510 if char in string.ascii_letters or char in ['"',"'"]:
511 return u'', ()
511 return u'', ()
512 try :
512 try :
513 unic = unicodedata.name(char)
513 unic = unicodedata.name(char)
514 return '\\'+char,['\\'+unic]
514 return '\\'+char,['\\'+unic]
515 except KeyError as e:
515 except KeyError as e:
516 pass
516 pass
517 return u'', ()
517 return u'', ()
518
518
519 def back_latex_name_matches(text):
519 def back_latex_name_matches(text):
520 u"""Match latex characters back to unicode name
520 u"""Match latex characters back to unicode name
521
521
522 This does ->\\sqrt
522 This does ->\\sqrt
523
523
524 Used on Python 3 only.
524 Used on Python 3 only.
525 """
525 """
526 if len(text)<2:
526 if len(text)<2:
527 return u'', ()
527 return u'', ()
528 maybe_slash = text[-2]
528 maybe_slash = text[-2]
529 if maybe_slash != '\\':
529 if maybe_slash != '\\':
530 return u'', ()
530 return u'', ()
531
531
532
532
533 char = text[-1]
533 char = text[-1]
534 # no expand on quote for completion in strings.
534 # no expand on quote for completion in strings.
535 # nor backcomplete standard ascii keys
535 # nor backcomplete standard ascii keys
536 if char in string.ascii_letters or char in ['"',"'"]:
536 if char in string.ascii_letters or char in ['"',"'"]:
537 return u'', ()
537 return u'', ()
538 try :
538 try :
539 latex = reverse_latex_symbol[char]
539 latex = reverse_latex_symbol[char]
540 # '\\' replace the \ as well
540 # '\\' replace the \ as well
541 return '\\'+char,[latex]
541 return '\\'+char,[latex]
542 except KeyError as e:
542 except KeyError as e:
543 pass
543 pass
544 return u'', ()
544 return u'', ()
545
545
546
546
547 class IPCompleter(Completer):
547 class IPCompleter(Completer):
548 """Extension of the completer class with IPython-specific features"""
548 """Extension of the completer class with IPython-specific features"""
549
549
550 def _greedy_changed(self, name, old, new):
550 def _greedy_changed(self, name, old, new):
551 """update the splitter and readline delims when greedy is changed"""
551 """update the splitter and readline delims when greedy is changed"""
552 if new:
552 if new:
553 self.splitter.delims = GREEDY_DELIMS
553 self.splitter.delims = GREEDY_DELIMS
554 else:
554 else:
555 self.splitter.delims = DELIMS
555 self.splitter.delims = DELIMS
556
556
557 if self.readline:
557 if self.readline:
558 self.readline.set_completer_delims(self.splitter.delims)
558 self.readline.set_completer_delims(self.splitter.delims)
559
559
560 merge_completions = CBool(True, config=True,
560 merge_completions = CBool(True, config=True,
561 help="""Whether to merge completion results into a single list
561 help="""Whether to merge completion results into a single list
562
562
563 If False, only the completion results from the first non-empty
563 If False, only the completion results from the first non-empty
564 completer will be returned.
564 completer will be returned.
565 """
565 """
566 )
566 )
567 omit__names = Enum((0,1,2), default_value=2, config=True,
567 omit__names = Enum((0,1,2), default_value=2, config=True,
568 help="""Instruct the completer to omit private method names
568 help="""Instruct the completer to omit private method names
569
569
570 Specifically, when completing on ``object.<tab>``.
570 Specifically, when completing on ``object.<tab>``.
571
571
572 When 2 [default]: all names that start with '_' will be excluded.
572 When 2 [default]: all names that start with '_' will be excluded.
573
573
574 When 1: all 'magic' names (``__foo__``) will be excluded.
574 When 1: all 'magic' names (``__foo__``) will be excluded.
575
575
576 When 0: nothing will be excluded.
576 When 0: nothing will be excluded.
577 """
577 """
578 )
578 )
579 limit_to__all__ = CBool(default_value=False, config=True,
579 limit_to__all__ = CBool(default_value=False, config=True,
580 help="""Instruct the completer to use __all__ for the completion
580 help="""Instruct the completer to use __all__ for the completion
581
581
582 Specifically, when completing on ``object.<tab>``.
582 Specifically, when completing on ``object.<tab>``.
583
583
584 When True: only those names in obj.__all__ will be included.
584 When True: only those names in obj.__all__ will be included.
585
585
586 When False [default]: the __all__ attribute is ignored
586 When False [default]: the __all__ attribute is ignored
587 """
587 """
588 )
588 )
589
589
590 def __init__(self, shell=None, namespace=None, global_namespace=None,
590 def __init__(self, shell=None, namespace=None, global_namespace=None,
591 use_readline=True, config=None, **kwargs):
591 use_readline=True, config=None, **kwargs):
592 """IPCompleter() -> completer
592 """IPCompleter() -> completer
593
593
594 Return a completer object suitable for use by the readline library
594 Return a completer object suitable for use by the readline library
595 via readline.set_completer().
595 via readline.set_completer().
596
596
597 Inputs:
597 Inputs:
598
598
599 - shell: a pointer to the ipython shell itself. This is needed
599 - shell: a pointer to the ipython shell itself. This is needed
600 because this completer knows about magic functions, and those can
600 because this completer knows about magic functions, and those can
601 only be accessed via the ipython instance.
601 only be accessed via the ipython instance.
602
602
603 - namespace: an optional dict where completions are performed.
603 - namespace: an optional dict where completions are performed.
604
604
605 - global_namespace: secondary optional dict for completions, to
605 - global_namespace: secondary optional dict for completions, to
606 handle cases (such as IPython embedded inside functions) where
606 handle cases (such as IPython embedded inside functions) where
607 both Python scopes are visible.
607 both Python scopes are visible.
608
608
609 use_readline : bool, optional
609 use_readline : bool, optional
610 If true, use the readline library. This completer can still function
610 If true, use the readline library. This completer can still function
611 without readline, though in that case callers must provide some extra
611 without readline, though in that case callers must provide some extra
612 information on each call about the current line."""
612 information on each call about the current line."""
613
613
614 self.magic_escape = ESC_MAGIC
614 self.magic_escape = ESC_MAGIC
615 self.splitter = CompletionSplitter()
615 self.splitter = CompletionSplitter()
616
616
617 # Readline configuration, only used by the rlcompleter method.
617 # Readline configuration, only used by the rlcompleter method.
618 if use_readline:
618 if use_readline:
619 # We store the right version of readline so that later code
619 # We store the right version of readline so that later code
620 import IPython.utils.rlineimpl as readline
620 import IPython.utils.rlineimpl as readline
621 self.readline = readline
621 self.readline = readline
622 else:
622 else:
623 self.readline = None
623 self.readline = None
624
624
625 # _greedy_changed() depends on splitter and readline being defined:
625 # _greedy_changed() depends on splitter and readline being defined:
626 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
626 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
627 config=config, **kwargs)
627 config=config, **kwargs)
628
628
629 # List where completion matches will be stored
629 # List where completion matches will be stored
630 self.matches = []
630 self.matches = []
631 self.shell = shell
631 self.shell = shell
632 # Regexp to split filenames with spaces in them
632 # Regexp to split filenames with spaces in them
633 self.space_name_re = re.compile(r'([^\\] )')
633 self.space_name_re = re.compile(r'([^\\] )')
634 # Hold a local ref. to glob.glob for speed
634 # Hold a local ref. to glob.glob for speed
635 self.glob = glob.glob
635 self.glob = glob.glob
636
636
637 # Determine if we are running on 'dumb' terminals, like (X)Emacs
637 # Determine if we are running on 'dumb' terminals, like (X)Emacs
638 # buffers, to avoid completion problems.
638 # buffers, to avoid completion problems.
639 term = os.environ.get('TERM','xterm')
639 term = os.environ.get('TERM','xterm')
640 self.dumb_terminal = term in ['dumb','emacs']
640 self.dumb_terminal = term in ['dumb','emacs']
641
641
642 # Special handling of backslashes needed in win32 platforms
642 # Special handling of backslashes needed in win32 platforms
643 if sys.platform == "win32":
643 if sys.platform == "win32":
644 self.clean_glob = self._clean_glob_win32
644 self.clean_glob = self._clean_glob_win32
645 else:
645 else:
646 self.clean_glob = self._clean_glob
646 self.clean_glob = self._clean_glob
647
647
648 #regexp to parse docstring for function signature
648 #regexp to parse docstring for function signature
649 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
649 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
650 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
650 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
651 #use this if positional argument name is also needed
651 #use this if positional argument name is also needed
652 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
652 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
653
653
654 # All active matcher routines for completion
654 # All active matcher routines for completion
655 self.matchers = [self.python_matches,
655 self.matchers = [self.python_matches,
656 self.file_matches,
656 self.file_matches,
657 self.magic_matches,
657 self.magic_matches,
658 self.python_func_kw_matches,
658 self.python_func_kw_matches,
659 self.dict_key_matches,
659 self.dict_key_matches,
660 ]
660 ]
661
661
662 def all_completions(self, text):
662 def all_completions(self, text):
663 """
663 """
664 Wrapper around the complete method for the benefit of emacs
664 Wrapper around the complete method for the benefit of emacs
665 and pydb.
665 and pydb.
666 """
666 """
667 return self.complete(text)[1]
667 return self.complete(text)[1]
668
668
669 def _clean_glob(self,text):
669 def _clean_glob(self,text):
670 return self.glob("%s*" % text)
670 return self.glob("%s*" % text)
671
671
672 def _clean_glob_win32(self,text):
672 def _clean_glob_win32(self,text):
673 return [f.replace("\\","/")
673 return [f.replace("\\","/")
674 for f in self.glob("%s*" % text)]
674 for f in self.glob("%s*" % text)]
675
675
676 def file_matches(self, text):
676 def file_matches(self, text):
677 """Match filenames, expanding ~USER type strings.
677 """Match filenames, expanding ~USER type strings.
678
678
679 Most of the seemingly convoluted logic in this completer is an
679 Most of the seemingly convoluted logic in this completer is an
680 attempt to handle filenames with spaces in them. And yet it's not
680 attempt to handle filenames with spaces in them. And yet it's not
681 quite perfect, because Python's readline doesn't expose all of the
681 quite perfect, because Python's readline doesn't expose all of the
682 GNU readline details needed for this to be done correctly.
682 GNU readline details needed for this to be done correctly.
683
683
684 For a filename with a space in it, the printed completions will be
684 For a filename with a space in it, the printed completions will be
685 only the parts after what's already been typed (instead of the
685 only the parts after what's already been typed (instead of the
686 full completions, as is normally done). I don't think with the
686 full completions, as is normally done). I don't think with the
687 current (as of Python 2.3) Python readline it's possible to do
687 current (as of Python 2.3) Python readline it's possible to do
688 better."""
688 better."""
689
689
690 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
690 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
691
691
692 # chars that require escaping with backslash - i.e. chars
692 # chars that require escaping with backslash - i.e. chars
693 # that readline treats incorrectly as delimiters, but we
693 # that readline treats incorrectly as delimiters, but we
694 # don't want to treat as delimiters in filename matching
694 # don't want to treat as delimiters in filename matching
695 # when escaped with backslash
695 # when escaped with backslash
696 if text.startswith('!'):
696 if text.startswith('!'):
697 text = text[1:]
697 text = text[1:]
698 text_prefix = '!'
698 text_prefix = '!'
699 else:
699 else:
700 text_prefix = ''
700 text_prefix = ''
701
701
702 text_until_cursor = self.text_until_cursor
702 text_until_cursor = self.text_until_cursor
703 # track strings with open quotes
703 # track strings with open quotes
704 open_quotes = has_open_quotes(text_until_cursor)
704 open_quotes = has_open_quotes(text_until_cursor)
705
705
706 if '(' in text_until_cursor or '[' in text_until_cursor:
706 if '(' in text_until_cursor or '[' in text_until_cursor:
707 lsplit = text
707 lsplit = text
708 else:
708 else:
709 try:
709 try:
710 # arg_split ~ shlex.split, but with unicode bugs fixed by us
710 # arg_split ~ shlex.split, but with unicode bugs fixed by us
711 lsplit = arg_split(text_until_cursor)[-1]
711 lsplit = arg_split(text_until_cursor)[-1]
712 except ValueError:
712 except ValueError:
713 # typically an unmatched ", or backslash without escaped char.
713 # typically an unmatched ", or backslash without escaped char.
714 if open_quotes:
714 if open_quotes:
715 lsplit = text_until_cursor.split(open_quotes)[-1]
715 lsplit = text_until_cursor.split(open_quotes)[-1]
716 else:
716 else:
717 return []
717 return []
718 except IndexError:
718 except IndexError:
719 # tab pressed on empty line
719 # tab pressed on empty line
720 lsplit = ""
720 lsplit = ""
721
721
722 if not open_quotes and lsplit != protect_filename(lsplit):
722 if not open_quotes and lsplit != protect_filename(lsplit):
723 # if protectables are found, do matching on the whole escaped name
723 # if protectables are found, do matching on the whole escaped name
724 has_protectables = True
724 has_protectables = True
725 text0,text = text,lsplit
725 text0,text = text,lsplit
726 else:
726 else:
727 has_protectables = False
727 has_protectables = False
728 text = os.path.expanduser(text)
728 text = os.path.expanduser(text)
729
729
730 if text == "":
730 if text == "":
731 return [text_prefix + protect_filename(f) for f in self.glob("*")]
731 return [text_prefix + protect_filename(f) for f in self.glob("*")]
732
732
733 # Compute the matches from the filesystem
733 # Compute the matches from the filesystem
734 m0 = self.clean_glob(text.replace('\\',''))
734 m0 = self.clean_glob(text.replace('\\',''))
735
735
736 if has_protectables:
736 if has_protectables:
737 # If we had protectables, we need to revert our changes to the
737 # If we had protectables, we need to revert our changes to the
738 # beginning of filename so that we don't double-write the part
738 # beginning of filename so that we don't double-write the part
739 # of the filename we have so far
739 # of the filename we have so far
740 len_lsplit = len(lsplit)
740 len_lsplit = len(lsplit)
741 matches = [text_prefix + text0 +
741 matches = [text_prefix + text0 +
742 protect_filename(f[len_lsplit:]) for f in m0]
742 protect_filename(f[len_lsplit:]) for f in m0]
743 else:
743 else:
744 if open_quotes:
744 if open_quotes:
745 # if we have a string with an open quote, we don't need to
745 # if we have a string with an open quote, we don't need to
746 # protect the names at all (and we _shouldn't_, as it
746 # protect the names at all (and we _shouldn't_, as it
747 # would cause bugs when the filesystem call is made).
747 # would cause bugs when the filesystem call is made).
748 matches = m0
748 matches = m0
749 else:
749 else:
750 matches = [text_prefix +
750 matches = [text_prefix +
751 protect_filename(f) for f in m0]
751 protect_filename(f) for f in m0]
752
752
753 #io.rprint('mm', matches) # dbg
753 #io.rprint('mm', matches) # dbg
754
754
755 # Mark directories in input list by appending '/' to their names.
755 # Mark directories in input list by appending '/' to their names.
756 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
756 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
757 return matches
757 return matches
758
758
759 def magic_matches(self, text):
759 def magic_matches(self, text):
760 """Match magics"""
760 """Match magics"""
761 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
761 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
762 # Get all shell magics now rather than statically, so magics loaded at
762 # Get all shell magics now rather than statically, so magics loaded at
763 # runtime show up too.
763 # runtime show up too.
764 lsm = self.shell.magics_manager.lsmagic()
764 lsm = self.shell.magics_manager.lsmagic()
765 line_magics = lsm['line']
765 line_magics = lsm['line']
766 cell_magics = lsm['cell']
766 cell_magics = lsm['cell']
767 pre = self.magic_escape
767 pre = self.magic_escape
768 pre2 = pre+pre
768 pre2 = pre+pre
769
769
770 # Completion logic:
770 # Completion logic:
771 # - user gives %%: only do cell magics
771 # - user gives %%: only do cell magics
772 # - user gives %: do both line and cell magics
772 # - user gives %: do both line and cell magics
773 # - no prefix: do both
773 # - no prefix: do both
774 # In other words, line magics are skipped if the user gives %% explicitly
774 # In other words, line magics are skipped if the user gives %% explicitly
775 bare_text = text.lstrip(pre)
775 bare_text = text.lstrip(pre)
776 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
776 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
777 if not text.startswith(pre2):
777 if not text.startswith(pre2):
778 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
778 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
779 return comp
779 return comp
780
780
781 def python_matches(self,text):
781 def python_matches(self,text):
782 """Match attributes or global python names"""
782 """Match attributes or global python names"""
783
783
784 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
784 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
785 if "." in text:
785 if "." in text:
786 try:
786 try:
787 matches = self.attr_matches(text)
787 matches = self.attr_matches(text)
788 if text.endswith('.') and self.omit__names:
788 if text.endswith('.') and self.omit__names:
789 if self.omit__names == 1:
789 if self.omit__names == 1:
790 # true if txt is _not_ a __ name, false otherwise:
790 # true if txt is _not_ a __ name, false otherwise:
791 no__name = (lambda txt:
791 no__name = (lambda txt:
792 re.match(r'.*\.__.*?__',txt) is None)
792 re.match(r'.*\.__.*?__',txt) is None)
793 else:
793 else:
794 # true if txt is _not_ a _ name, false otherwise:
794 # true if txt is _not_ a _ name, false otherwise:
795 no__name = (lambda txt:
795 no__name = (lambda txt:
796 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
796 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
797 matches = filter(no__name, matches)
797 matches = filter(no__name, matches)
798 except NameError:
798 except NameError:
799 # catches <undefined attributes>.<tab>
799 # catches <undefined attributes>.<tab>
800 matches = []
800 matches = []
801 else:
801 else:
802 matches = self.global_matches(text)
802 matches = self.global_matches(text)
803
803
804 return matches
804 return matches
805
805
806 def _default_arguments_from_docstring(self, doc):
806 def _default_arguments_from_docstring(self, doc):
807 """Parse the first line of docstring for call signature.
807 """Parse the first line of docstring for call signature.
808
808
809 Docstring should be of the form 'min(iterable[, key=func])\n'.
809 Docstring should be of the form 'min(iterable[, key=func])\n'.
810 It can also parse cython docstring of the form
810 It can also parse cython docstring of the form
811 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
811 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
812 """
812 """
813 if doc is None:
813 if doc is None:
814 return []
814 return []
815
815
816 #care only the firstline
816 #care only the firstline
817 line = doc.lstrip().splitlines()[0]
817 line = doc.lstrip().splitlines()[0]
818
818
819 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
819 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
820 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
820 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
821 sig = self.docstring_sig_re.search(line)
821 sig = self.docstring_sig_re.search(line)
822 if sig is None:
822 if sig is None:
823 return []
823 return []
824 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
824 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
825 sig = sig.groups()[0].split(',')
825 sig = sig.groups()[0].split(',')
826 ret = []
826 ret = []
827 for s in sig:
827 for s in sig:
828 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
828 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
829 ret += self.docstring_kwd_re.findall(s)
829 ret += self.docstring_kwd_re.findall(s)
830 return ret
830 return ret
831
831
832 def _default_arguments(self, obj):
832 def _default_arguments(self, obj):
833 """Return the list of default arguments of obj if it is callable,
833 """Return the list of default arguments of obj if it is callable,
834 or empty list otherwise."""
834 or empty list otherwise."""
835 call_obj = obj
835 call_obj = obj
836 ret = []
836 ret = []
837 if inspect.isbuiltin(obj):
837 if inspect.isbuiltin(obj):
838 pass
838 pass
839 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
839 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
840 if inspect.isclass(obj):
840 if inspect.isclass(obj):
841 #for cython embededsignature=True the constructor docstring
841 #for cython embededsignature=True the constructor docstring
842 #belongs to the object itself not __init__
842 #belongs to the object itself not __init__
843 ret += self._default_arguments_from_docstring(
843 ret += self._default_arguments_from_docstring(
844 getattr(obj, '__doc__', ''))
844 getattr(obj, '__doc__', ''))
845 # for classes, check for __init__,__new__
845 # for classes, check for __init__,__new__
846 call_obj = (getattr(obj, '__init__', None) or
846 call_obj = (getattr(obj, '__init__', None) or
847 getattr(obj, '__new__', None))
847 getattr(obj, '__new__', None))
848 # for all others, check if they are __call__able
848 # for all others, check if they are __call__able
849 elif hasattr(obj, '__call__'):
849 elif hasattr(obj, '__call__'):
850 call_obj = obj.__call__
850 call_obj = obj.__call__
851 ret += self._default_arguments_from_docstring(
851 ret += self._default_arguments_from_docstring(
852 getattr(call_obj, '__doc__', ''))
852 getattr(call_obj, '__doc__', ''))
853
853
854 if PY3:
854 if PY3:
855 _keeps = (inspect.Parameter.KEYWORD_ONLY,
855 _keeps = (inspect.Parameter.KEYWORD_ONLY,
856 inspect.Parameter.POSITIONAL_OR_KEYWORD)
856 inspect.Parameter.POSITIONAL_OR_KEYWORD)
857 signature = inspect.signature
857 signature = inspect.signature
858 else:
858 else:
859 import IPython.utils.signatures
859 import IPython.utils.signatures
860 _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
860 _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
861 IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
861 IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
862 signature = IPython.utils.signatures.signature
862 signature = IPython.utils.signatures.signature
863
863
864 try:
864 try:
865 sig = signature(call_obj)
865 sig = signature(call_obj)
866 ret.extend(k for k, v in sig.parameters.items() if
866 ret.extend(k for k, v in sig.parameters.items() if
867 v.kind in _keeps)
867 v.kind in _keeps)
868 except ValueError:
868 except ValueError:
869 pass
869 pass
870
870
871 return list(set(ret))
871 return list(set(ret))
872
872
873 def python_func_kw_matches(self,text):
873 def python_func_kw_matches(self,text):
874 """Match named parameters (kwargs) of the last open function"""
874 """Match named parameters (kwargs) of the last open function"""
875
875
876 if "." in text: # a parameter cannot be dotted
876 if "." in text: # a parameter cannot be dotted
877 return []
877 return []
878 try: regexp = self.__funcParamsRegex
878 try: regexp = self.__funcParamsRegex
879 except AttributeError:
879 except AttributeError:
880 regexp = self.__funcParamsRegex = re.compile(r'''
880 regexp = self.__funcParamsRegex = re.compile(r'''
881 '.*?(?<!\\)' | # single quoted strings or
881 '.*?(?<!\\)' | # single quoted strings or
882 ".*?(?<!\\)" | # double quoted strings or
882 ".*?(?<!\\)" | # double quoted strings or
883 \w+ | # identifier
883 \w+ | # identifier
884 \S # other characters
884 \S # other characters
885 ''', re.VERBOSE | re.DOTALL)
885 ''', re.VERBOSE | re.DOTALL)
886 # 1. find the nearest identifier that comes before an unclosed
886 # 1. find the nearest identifier that comes before an unclosed
887 # parenthesis before the cursor
887 # parenthesis before the cursor
888 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
888 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
889 tokens = regexp.findall(self.text_until_cursor)
889 tokens = regexp.findall(self.text_until_cursor)
890 tokens.reverse()
890 tokens.reverse()
891 iterTokens = iter(tokens); openPar = 0
891 iterTokens = iter(tokens); openPar = 0
892
892
893 for token in iterTokens:
893 for token in iterTokens:
894 if token == ')':
894 if token == ')':
895 openPar -= 1
895 openPar -= 1
896 elif token == '(':
896 elif token == '(':
897 openPar += 1
897 openPar += 1
898 if openPar > 0:
898 if openPar > 0:
899 # found the last unclosed parenthesis
899 # found the last unclosed parenthesis
900 break
900 break
901 else:
901 else:
902 return []
902 return []
903 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
903 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
904 ids = []
904 ids = []
905 isId = re.compile(r'\w+$').match
905 isId = re.compile(r'\w+$').match
906
906
907 while True:
907 while True:
908 try:
908 try:
909 ids.append(next(iterTokens))
909 ids.append(next(iterTokens))
910 if not isId(ids[-1]):
910 if not isId(ids[-1]):
911 ids.pop(); break
911 ids.pop(); break
912 if not next(iterTokens) == '.':
912 if not next(iterTokens) == '.':
913 break
913 break
914 except StopIteration:
914 except StopIteration:
915 break
915 break
916 # lookup the candidate callable matches either using global_matches
916 # lookup the candidate callable matches either using global_matches
917 # or attr_matches for dotted names
917 # or attr_matches for dotted names
918 if len(ids) == 1:
918 if len(ids) == 1:
919 callableMatches = self.global_matches(ids[0])
919 callableMatches = self.global_matches(ids[0])
920 else:
920 else:
921 callableMatches = self.attr_matches('.'.join(ids[::-1]))
921 callableMatches = self.attr_matches('.'.join(ids[::-1]))
922 argMatches = []
922 argMatches = []
923 for callableMatch in callableMatches:
923 for callableMatch in callableMatches:
924 try:
924 try:
925 namedArgs = self._default_arguments(eval(callableMatch,
925 namedArgs = self._default_arguments(eval(callableMatch,
926 self.namespace))
926 self.namespace))
927 except:
927 except:
928 continue
928 continue
929
929
930 for namedArg in namedArgs:
930 for namedArg in namedArgs:
931 if namedArg.startswith(text):
931 if namedArg.startswith(text):
932 argMatches.append("%s=" %namedArg)
932 argMatches.append("%s=" %namedArg)
933 return argMatches
933 return argMatches
934
934
935 def dict_key_matches(self, text):
935 def dict_key_matches(self, text):
936 "Match string keys in a dictionary, after e.g. 'foo[' "
936 "Match string keys in a dictionary, after e.g. 'foo[' "
937 def get_keys(obj):
937 def get_keys(obj):
938 # Objects can define their own completions by defining an
938 # Objects can define their own completions by defining an
939 # _ipy_key_completions_() method.
939 # _ipy_key_completions_() method.
940 if _safe_really_hasattr(obj, '_ipy_key_completions_'):
940 if _safe_really_hasattr(obj, '_ipython_key_completions_'):
941 return obj._ipy_key_completions_()
941 return obj._ipython_key_completions_()
942
942
943 # Special case some common in-memory dict-like types
943 # Special case some common in-memory dict-like types
944 if isinstance(obj, dict) or\
944 if isinstance(obj, dict) or\
945 _safe_isinstance(obj, 'pandas', 'DataFrame'):
945 _safe_isinstance(obj, 'pandas', 'DataFrame'):
946 try:
946 try:
947 return list(obj.keys())
947 return list(obj.keys())
948 except Exception:
948 except Exception:
949 return []
949 return []
950 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
950 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
951 _safe_isinstance(obj, 'numpy', 'void'):
951 _safe_isinstance(obj, 'numpy', 'void'):
952 return obj.dtype.names or []
952 return obj.dtype.names or []
953 return []
953 return []
954
954
955 try:
955 try:
956 regexps = self.__dict_key_regexps
956 regexps = self.__dict_key_regexps
957 except AttributeError:
957 except AttributeError:
958 dict_key_re_fmt = r'''(?x)
958 dict_key_re_fmt = r'''(?x)
959 ( # match dict-referring expression wrt greedy setting
959 ( # match dict-referring expression wrt greedy setting
960 %s
960 %s
961 )
961 )
962 \[ # open bracket
962 \[ # open bracket
963 \s* # and optional whitespace
963 \s* # and optional whitespace
964 ([uUbB]? # string prefix (r not handled)
964 ([uUbB]? # string prefix (r not handled)
965 (?: # unclosed string
965 (?: # unclosed string
966 '(?:[^']|(?<!\\)\\')*
966 '(?:[^']|(?<!\\)\\')*
967 |
967 |
968 "(?:[^"]|(?<!\\)\\")*
968 "(?:[^"]|(?<!\\)\\")*
969 )
969 )
970 )?
970 )?
971 $
971 $
972 '''
972 '''
973 regexps = self.__dict_key_regexps = {
973 regexps = self.__dict_key_regexps = {
974 False: re.compile(dict_key_re_fmt % '''
974 False: re.compile(dict_key_re_fmt % '''
975 # identifiers separated by .
975 # identifiers separated by .
976 (?!\d)\w+
976 (?!\d)\w+
977 (?:\.(?!\d)\w+)*
977 (?:\.(?!\d)\w+)*
978 '''),
978 '''),
979 True: re.compile(dict_key_re_fmt % '''
979 True: re.compile(dict_key_re_fmt % '''
980 .+
980 .+
981 ''')
981 ''')
982 }
982 }
983
983
984 match = regexps[self.greedy].search(self.text_until_cursor)
984 match = regexps[self.greedy].search(self.text_until_cursor)
985 if match is None:
985 if match is None:
986 return []
986 return []
987
987
988 expr, prefix = match.groups()
988 expr, prefix = match.groups()
989 try:
989 try:
990 obj = eval(expr, self.namespace)
990 obj = eval(expr, self.namespace)
991 except Exception:
991 except Exception:
992 try:
992 try:
993 obj = eval(expr, self.global_namespace)
993 obj = eval(expr, self.global_namespace)
994 except Exception:
994 except Exception:
995 return []
995 return []
996
996
997 keys = get_keys(obj)
997 keys = get_keys(obj)
998 if not keys:
998 if not keys:
999 return keys
999 return keys
1000 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1000 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1001 if not matches:
1001 if not matches:
1002 return matches
1002 return matches
1003
1003
1004 # get the cursor position of
1004 # get the cursor position of
1005 # - the text being completed
1005 # - the text being completed
1006 # - the start of the key text
1006 # - the start of the key text
1007 # - the start of the completion
1007 # - the start of the completion
1008 text_start = len(self.text_until_cursor) - len(text)
1008 text_start = len(self.text_until_cursor) - len(text)
1009 if prefix:
1009 if prefix:
1010 key_start = match.start(2)
1010 key_start = match.start(2)
1011 completion_start = key_start + token_offset
1011 completion_start = key_start + token_offset
1012 else:
1012 else:
1013 key_start = completion_start = match.end()
1013 key_start = completion_start = match.end()
1014
1014
1015 # grab the leading prefix, to make sure all completions start with `text`
1015 # grab the leading prefix, to make sure all completions start with `text`
1016 if text_start > key_start:
1016 if text_start > key_start:
1017 leading = ''
1017 leading = ''
1018 else:
1018 else:
1019 leading = text[text_start:completion_start]
1019 leading = text[text_start:completion_start]
1020
1020
1021 # the index of the `[` character
1021 # the index of the `[` character
1022 bracket_idx = match.end(1)
1022 bracket_idx = match.end(1)
1023
1023
1024 # append closing quote and bracket as appropriate
1024 # append closing quote and bracket as appropriate
1025 # this is *not* appropriate if the opening quote or bracket is outside
1025 # this is *not* appropriate if the opening quote or bracket is outside
1026 # the text given to this method
1026 # the text given to this method
1027 suf = ''
1027 suf = ''
1028 continuation = self.line_buffer[len(self.text_until_cursor):]
1028 continuation = self.line_buffer[len(self.text_until_cursor):]
1029 if key_start > text_start and closing_quote:
1029 if key_start > text_start and closing_quote:
1030 # quotes were opened inside text, maybe close them
1030 # quotes were opened inside text, maybe close them
1031 if continuation.startswith(closing_quote):
1031 if continuation.startswith(closing_quote):
1032 continuation = continuation[len(closing_quote):]
1032 continuation = continuation[len(closing_quote):]
1033 else:
1033 else:
1034 suf += closing_quote
1034 suf += closing_quote
1035 if bracket_idx > text_start:
1035 if bracket_idx > text_start:
1036 # brackets were opened inside text, maybe close them
1036 # brackets were opened inside text, maybe close them
1037 if not continuation.startswith(']'):
1037 if not continuation.startswith(']'):
1038 suf += ']'
1038 suf += ']'
1039
1039
1040 return [leading + k + suf for k in matches]
1040 return [leading + k + suf for k in matches]
1041
1041
1042 def unicode_name_matches(self, text):
1042 def unicode_name_matches(self, text):
1043 u"""Match Latex-like syntax for unicode characters base
1043 u"""Match Latex-like syntax for unicode characters base
1044 on the name of the character.
1044 on the name of the character.
1045
1045
1046 This does \\GREEK SMALL LETTER ETA -> Ξ·
1046 This does \\GREEK SMALL LETTER ETA -> Ξ·
1047
1047
1048 Works only on valid python 3 identifier, or on combining characters that
1048 Works only on valid python 3 identifier, or on combining characters that
1049 will combine to form a valid identifier.
1049 will combine to form a valid identifier.
1050
1050
1051 Used on Python 3 only.
1051 Used on Python 3 only.
1052 """
1052 """
1053 slashpos = text.rfind('\\')
1053 slashpos = text.rfind('\\')
1054 if slashpos > -1:
1054 if slashpos > -1:
1055 s = text[slashpos+1:]
1055 s = text[slashpos+1:]
1056 try :
1056 try :
1057 unic = unicodedata.lookup(s)
1057 unic = unicodedata.lookup(s)
1058 # allow combining chars
1058 # allow combining chars
1059 if ('a'+unic).isidentifier():
1059 if ('a'+unic).isidentifier():
1060 return '\\'+s,[unic]
1060 return '\\'+s,[unic]
1061 except KeyError as e:
1061 except KeyError as e:
1062 pass
1062 pass
1063 return u'', []
1063 return u'', []
1064
1064
1065
1065
1066
1066
1067
1067
1068 def latex_matches(self, text):
1068 def latex_matches(self, text):
1069 u"""Match Latex syntax for unicode characters.
1069 u"""Match Latex syntax for unicode characters.
1070
1070
1071 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1071 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1072
1072
1073 Used on Python 3 only.
1073 Used on Python 3 only.
1074 """
1074 """
1075 slashpos = text.rfind('\\')
1075 slashpos = text.rfind('\\')
1076 if slashpos > -1:
1076 if slashpos > -1:
1077 s = text[slashpos:]
1077 s = text[slashpos:]
1078 if s in latex_symbols:
1078 if s in latex_symbols:
1079 # Try to complete a full latex symbol to unicode
1079 # Try to complete a full latex symbol to unicode
1080 # \\alpha -> Ξ±
1080 # \\alpha -> Ξ±
1081 return s, [latex_symbols[s]]
1081 return s, [latex_symbols[s]]
1082 else:
1082 else:
1083 # If a user has partially typed a latex symbol, give them
1083 # If a user has partially typed a latex symbol, give them
1084 # a full list of options \al -> [\aleph, \alpha]
1084 # a full list of options \al -> [\aleph, \alpha]
1085 matches = [k for k in latex_symbols if k.startswith(s)]
1085 matches = [k for k in latex_symbols if k.startswith(s)]
1086 return s, matches
1086 return s, matches
1087 return u'', []
1087 return u'', []
1088
1088
1089 def dispatch_custom_completer(self, text):
1089 def dispatch_custom_completer(self, text):
1090 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
1090 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
1091 line = self.line_buffer
1091 line = self.line_buffer
1092 if not line.strip():
1092 if not line.strip():
1093 return None
1093 return None
1094
1094
1095 # Create a little structure to pass all the relevant information about
1095 # Create a little structure to pass all the relevant information about
1096 # the current completion to any custom completer.
1096 # the current completion to any custom completer.
1097 event = Bunch()
1097 event = Bunch()
1098 event.line = line
1098 event.line = line
1099 event.symbol = text
1099 event.symbol = text
1100 cmd = line.split(None,1)[0]
1100 cmd = line.split(None,1)[0]
1101 event.command = cmd
1101 event.command = cmd
1102 event.text_until_cursor = self.text_until_cursor
1102 event.text_until_cursor = self.text_until_cursor
1103
1103
1104 #print "\ncustom:{%s]\n" % event # dbg
1104 #print "\ncustom:{%s]\n" % event # dbg
1105
1105
1106 # for foo etc, try also to find completer for %foo
1106 # for foo etc, try also to find completer for %foo
1107 if not cmd.startswith(self.magic_escape):
1107 if not cmd.startswith(self.magic_escape):
1108 try_magic = self.custom_completers.s_matches(
1108 try_magic = self.custom_completers.s_matches(
1109 self.magic_escape + cmd)
1109 self.magic_escape + cmd)
1110 else:
1110 else:
1111 try_magic = []
1111 try_magic = []
1112
1112
1113 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1113 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1114 try_magic,
1114 try_magic,
1115 self.custom_completers.flat_matches(self.text_until_cursor)):
1115 self.custom_completers.flat_matches(self.text_until_cursor)):
1116 #print "try",c # dbg
1116 #print "try",c # dbg
1117 try:
1117 try:
1118 res = c(event)
1118 res = c(event)
1119 if res:
1119 if res:
1120 # first, try case sensitive match
1120 # first, try case sensitive match
1121 withcase = [r for r in res if r.startswith(text)]
1121 withcase = [r for r in res if r.startswith(text)]
1122 if withcase:
1122 if withcase:
1123 return withcase
1123 return withcase
1124 # if none, then case insensitive ones are ok too
1124 # if none, then case insensitive ones are ok too
1125 text_low = text.lower()
1125 text_low = text.lower()
1126 return [r for r in res if r.lower().startswith(text_low)]
1126 return [r for r in res if r.lower().startswith(text_low)]
1127 except TryNext:
1127 except TryNext:
1128 pass
1128 pass
1129
1129
1130 return None
1130 return None
1131
1131
1132 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1132 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1133 """Find completions for the given text and line context.
1133 """Find completions for the given text and line context.
1134
1134
1135 Note that both the text and the line_buffer are optional, but at least
1135 Note that both the text and the line_buffer are optional, but at least
1136 one of them must be given.
1136 one of them must be given.
1137
1137
1138 Parameters
1138 Parameters
1139 ----------
1139 ----------
1140 text : string, optional
1140 text : string, optional
1141 Text to perform the completion on. If not given, the line buffer
1141 Text to perform the completion on. If not given, the line buffer
1142 is split using the instance's CompletionSplitter object.
1142 is split using the instance's CompletionSplitter object.
1143
1143
1144 line_buffer : string, optional
1144 line_buffer : string, optional
1145 If not given, the completer attempts to obtain the current line
1145 If not given, the completer attempts to obtain the current line
1146 buffer via readline. This keyword allows clients which are
1146 buffer via readline. This keyword allows clients which are
1147 requesting for text completions in non-readline contexts to inform
1147 requesting for text completions in non-readline contexts to inform
1148 the completer of the entire text.
1148 the completer of the entire text.
1149
1149
1150 cursor_pos : int, optional
1150 cursor_pos : int, optional
1151 Index of the cursor in the full line buffer. Should be provided by
1151 Index of the cursor in the full line buffer. Should be provided by
1152 remote frontends where kernel has no access to frontend state.
1152 remote frontends where kernel has no access to frontend state.
1153
1153
1154 Returns
1154 Returns
1155 -------
1155 -------
1156 text : str
1156 text : str
1157 Text that was actually used in the completion.
1157 Text that was actually used in the completion.
1158
1158
1159 matches : list
1159 matches : list
1160 A list of completion matches.
1160 A list of completion matches.
1161 """
1161 """
1162 # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1162 # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1163
1163
1164 # if the cursor position isn't given, the only sane assumption we can
1164 # if the cursor position isn't given, the only sane assumption we can
1165 # make is that it's at the end of the line (the common case)
1165 # make is that it's at the end of the line (the common case)
1166 if cursor_pos is None:
1166 if cursor_pos is None:
1167 cursor_pos = len(line_buffer) if text is None else len(text)
1167 cursor_pos = len(line_buffer) if text is None else len(text)
1168
1168
1169 if PY3:
1169 if PY3:
1170
1170
1171 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1171 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1172 latex_text, latex_matches = self.latex_matches(base_text)
1172 latex_text, latex_matches = self.latex_matches(base_text)
1173 if latex_matches:
1173 if latex_matches:
1174 return latex_text, latex_matches
1174 return latex_text, latex_matches
1175 name_text = ''
1175 name_text = ''
1176 name_matches = []
1176 name_matches = []
1177 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1177 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1178 name_text, name_matches = meth(base_text)
1178 name_text, name_matches = meth(base_text)
1179 if name_text:
1179 if name_text:
1180 return name_text, name_matches
1180 return name_text, name_matches
1181
1181
1182 # if text is either None or an empty string, rely on the line buffer
1182 # if text is either None or an empty string, rely on the line buffer
1183 if not text:
1183 if not text:
1184 text = self.splitter.split_line(line_buffer, cursor_pos)
1184 text = self.splitter.split_line(line_buffer, cursor_pos)
1185
1185
1186 # If no line buffer is given, assume the input text is all there was
1186 # If no line buffer is given, assume the input text is all there was
1187 if line_buffer is None:
1187 if line_buffer is None:
1188 line_buffer = text
1188 line_buffer = text
1189
1189
1190 self.line_buffer = line_buffer
1190 self.line_buffer = line_buffer
1191 self.text_until_cursor = self.line_buffer[:cursor_pos]
1191 self.text_until_cursor = self.line_buffer[:cursor_pos]
1192 # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1192 # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1193
1193
1194 # Start with a clean slate of completions
1194 # Start with a clean slate of completions
1195 self.matches[:] = []
1195 self.matches[:] = []
1196 custom_res = self.dispatch_custom_completer(text)
1196 custom_res = self.dispatch_custom_completer(text)
1197 if custom_res is not None:
1197 if custom_res is not None:
1198 # did custom completers produce something?
1198 # did custom completers produce something?
1199 self.matches = custom_res
1199 self.matches = custom_res
1200 else:
1200 else:
1201 # Extend the list of completions with the results of each
1201 # Extend the list of completions with the results of each
1202 # matcher, so we return results to the user from all
1202 # matcher, so we return results to the user from all
1203 # namespaces.
1203 # namespaces.
1204 if self.merge_completions:
1204 if self.merge_completions:
1205 self.matches = []
1205 self.matches = []
1206 for matcher in self.matchers:
1206 for matcher in self.matchers:
1207 try:
1207 try:
1208 self.matches.extend(matcher(text))
1208 self.matches.extend(matcher(text))
1209 except:
1209 except:
1210 # Show the ugly traceback if the matcher causes an
1210 # Show the ugly traceback if the matcher causes an
1211 # exception, but do NOT crash the kernel!
1211 # exception, but do NOT crash the kernel!
1212 sys.excepthook(*sys.exc_info())
1212 sys.excepthook(*sys.exc_info())
1213 else:
1213 else:
1214 for matcher in self.matchers:
1214 for matcher in self.matchers:
1215 self.matches = matcher(text)
1215 self.matches = matcher(text)
1216 if self.matches:
1216 if self.matches:
1217 break
1217 break
1218 # FIXME: we should extend our api to return a dict with completions for
1218 # FIXME: we should extend our api to return a dict with completions for
1219 # different types of objects. The rlcomplete() method could then
1219 # different types of objects. The rlcomplete() method could then
1220 # simply collapse the dict into a list for readline, but we'd have
1220 # simply collapse the dict into a list for readline, but we'd have
1221 # richer completion semantics in other evironments.
1221 # richer completion semantics in other evironments.
1222
1222
1223 self.matches = sorted(set(self.matches), key=completions_sorting_key)
1223 self.matches = sorted(set(self.matches), key=completions_sorting_key)
1224
1224
1225 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1225 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1226 return text, self.matches
1226 return text, self.matches
1227
1227
1228 def rlcomplete(self, text, state):
1228 def rlcomplete(self, text, state):
1229 """Return the state-th possible completion for 'text'.
1229 """Return the state-th possible completion for 'text'.
1230
1230
1231 This is called successively with state == 0, 1, 2, ... until it
1231 This is called successively with state == 0, 1, 2, ... until it
1232 returns None. The completion should begin with 'text'.
1232 returns None. The completion should begin with 'text'.
1233
1233
1234 Parameters
1234 Parameters
1235 ----------
1235 ----------
1236 text : string
1236 text : string
1237 Text to perform the completion on.
1237 Text to perform the completion on.
1238
1238
1239 state : int
1239 state : int
1240 Counter used by readline.
1240 Counter used by readline.
1241 """
1241 """
1242 if state==0:
1242 if state==0:
1243
1243
1244 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1244 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1245 cursor_pos = self.readline.get_endidx()
1245 cursor_pos = self.readline.get_endidx()
1246
1246
1247 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1247 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1248 # (text, line_buffer, cursor_pos) ) # dbg
1248 # (text, line_buffer, cursor_pos) ) # dbg
1249
1249
1250 # if there is only a tab on a line with only whitespace, instead of
1250 # if there is only a tab on a line with only whitespace, instead of
1251 # the mostly useless 'do you want to see all million completions'
1251 # the mostly useless 'do you want to see all million completions'
1252 # message, just do the right thing and give the user his tab!
1252 # message, just do the right thing and give the user his tab!
1253 # Incidentally, this enables pasting of tabbed text from an editor
1253 # Incidentally, this enables pasting of tabbed text from an editor
1254 # (as long as autoindent is off).
1254 # (as long as autoindent is off).
1255
1255
1256 # It should be noted that at least pyreadline still shows file
1256 # It should be noted that at least pyreadline still shows file
1257 # completions - is there a way around it?
1257 # completions - is there a way around it?
1258
1258
1259 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1259 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1260 # we don't interfere with their own tab-completion mechanism.
1260 # we don't interfere with their own tab-completion mechanism.
1261 if not (self.dumb_terminal or line_buffer.strip()):
1261 if not (self.dumb_terminal or line_buffer.strip()):
1262 self.readline.insert_text('\t')
1262 self.readline.insert_text('\t')
1263 sys.stdout.flush()
1263 sys.stdout.flush()
1264 return None
1264 return None
1265
1265
1266 # Note: debugging exceptions that may occur in completion is very
1266 # Note: debugging exceptions that may occur in completion is very
1267 # tricky, because readline unconditionally silences them. So if
1267 # tricky, because readline unconditionally silences them. So if
1268 # during development you suspect a bug in the completion code, turn
1268 # during development you suspect a bug in the completion code, turn
1269 # this flag on temporarily by uncommenting the second form (don't
1269 # this flag on temporarily by uncommenting the second form (don't
1270 # flip the value in the first line, as the '# dbg' marker can be
1270 # flip the value in the first line, as the '# dbg' marker can be
1271 # automatically detected and is used elsewhere).
1271 # automatically detected and is used elsewhere).
1272 DEBUG = False
1272 DEBUG = False
1273 #DEBUG = True # dbg
1273 #DEBUG = True # dbg
1274 if DEBUG:
1274 if DEBUG:
1275 try:
1275 try:
1276 self.complete(text, line_buffer, cursor_pos)
1276 self.complete(text, line_buffer, cursor_pos)
1277 except:
1277 except:
1278 import traceback; traceback.print_exc()
1278 import traceback; traceback.print_exc()
1279 else:
1279 else:
1280 # The normal production version is here
1280 # The normal production version is here
1281
1281
1282 # This method computes the self.matches array
1282 # This method computes the self.matches array
1283 self.complete(text, line_buffer, cursor_pos)
1283 self.complete(text, line_buffer, cursor_pos)
1284
1284
1285 try:
1285 try:
1286 return self.matches[state]
1286 return self.matches[state]
1287 except IndexError:
1287 except IndexError:
1288 return None
1288 return None
1289
1289
@@ -1,796 +1,796 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Tests for the IPython tab-completion machinery."""
2 """Tests for the IPython tab-completion machinery."""
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import sys
8 import sys
9 import unittest
9 import unittest
10
10
11 from contextlib import contextmanager
11 from contextlib import contextmanager
12
12
13 import nose.tools as nt
13 import nose.tools as nt
14
14
15 from traitlets.config.loader import Config
15 from traitlets.config.loader import Config
16 from IPython import get_ipython
16 from IPython import get_ipython
17 from IPython.core import completer
17 from IPython.core import completer
18 from IPython.external.decorators import knownfailureif
18 from IPython.external.decorators import knownfailureif
19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 from IPython.utils.generics import complete_object
20 from IPython.utils.generics import complete_object
21 from IPython.utils.py3compat import string_types, unicode_type
21 from IPython.utils.py3compat import string_types, unicode_type
22 from IPython.testing import decorators as dec
22 from IPython.testing import decorators as dec
23
23
24 #-----------------------------------------------------------------------------
24 #-----------------------------------------------------------------------------
25 # Test functions
25 # Test functions
26 #-----------------------------------------------------------------------------
26 #-----------------------------------------------------------------------------
27
27
28 @contextmanager
28 @contextmanager
29 def greedy_completion():
29 def greedy_completion():
30 ip = get_ipython()
30 ip = get_ipython()
31 greedy_original = ip.Completer.greedy
31 greedy_original = ip.Completer.greedy
32 try:
32 try:
33 ip.Completer.greedy = True
33 ip.Completer.greedy = True
34 yield
34 yield
35 finally:
35 finally:
36 ip.Completer.greedy = greedy_original
36 ip.Completer.greedy = greedy_original
37
37
38 def test_protect_filename():
38 def test_protect_filename():
39 pairs = [ ('abc','abc'),
39 pairs = [ ('abc','abc'),
40 (' abc',r'\ abc'),
40 (' abc',r'\ abc'),
41 ('a bc',r'a\ bc'),
41 ('a bc',r'a\ bc'),
42 ('a bc',r'a\ \ bc'),
42 ('a bc',r'a\ \ bc'),
43 (' bc',r'\ \ bc'),
43 (' bc',r'\ \ bc'),
44 ]
44 ]
45 # On posix, we also protect parens and other special characters
45 # On posix, we also protect parens and other special characters
46 if sys.platform != 'win32':
46 if sys.platform != 'win32':
47 pairs.extend( [('a(bc',r'a\(bc'),
47 pairs.extend( [('a(bc',r'a\(bc'),
48 ('a)bc',r'a\)bc'),
48 ('a)bc',r'a\)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
50 ('a[1]bc', r'a\[1\]bc'),
50 ('a[1]bc', r'a\[1\]bc'),
51 ('a{1}bc', r'a\{1\}bc'),
51 ('a{1}bc', r'a\{1\}bc'),
52 ('a#bc', r'a\#bc'),
52 ('a#bc', r'a\#bc'),
53 ('a?bc', r'a\?bc'),
53 ('a?bc', r'a\?bc'),
54 ('a=bc', r'a\=bc'),
54 ('a=bc', r'a\=bc'),
55 ('a\\bc', r'a\\bc'),
55 ('a\\bc', r'a\\bc'),
56 ('a|bc', r'a\|bc'),
56 ('a|bc', r'a\|bc'),
57 ('a;bc', r'a\;bc'),
57 ('a;bc', r'a\;bc'),
58 ('a:bc', r'a\:bc'),
58 ('a:bc', r'a\:bc'),
59 ("a'bc", r"a\'bc"),
59 ("a'bc", r"a\'bc"),
60 ('a*bc', r'a\*bc'),
60 ('a*bc', r'a\*bc'),
61 ('a"bc', r'a\"bc'),
61 ('a"bc', r'a\"bc'),
62 ('a^bc', r'a\^bc'),
62 ('a^bc', r'a\^bc'),
63 ('a&bc', r'a\&bc'),
63 ('a&bc', r'a\&bc'),
64 ] )
64 ] )
65 # run the actual tests
65 # run the actual tests
66 for s1, s2 in pairs:
66 for s1, s2 in pairs:
67 s1p = completer.protect_filename(s1)
67 s1p = completer.protect_filename(s1)
68 nt.assert_equal(s1p, s2)
68 nt.assert_equal(s1p, s2)
69
69
70
70
71 def check_line_split(splitter, test_specs):
71 def check_line_split(splitter, test_specs):
72 for part1, part2, split in test_specs:
72 for part1, part2, split in test_specs:
73 cursor_pos = len(part1)
73 cursor_pos = len(part1)
74 line = part1+part2
74 line = part1+part2
75 out = splitter.split_line(line, cursor_pos)
75 out = splitter.split_line(line, cursor_pos)
76 nt.assert_equal(out, split)
76 nt.assert_equal(out, split)
77
77
78
78
79 def test_line_split():
79 def test_line_split():
80 """Basic line splitter test with default specs."""
80 """Basic line splitter test with default specs."""
81 sp = completer.CompletionSplitter()
81 sp = completer.CompletionSplitter()
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
84 # was at the end of part1. So an empty part2 represents someone hitting
84 # was at the end of part1. So an empty part2 represents someone hitting
85 # tab at the end of the line, the most common case.
85 # tab at the end of the line, the most common case.
86 t = [('run some/scrip', '', 'some/scrip'),
86 t = [('run some/scrip', '', 'some/scrip'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
88 ('echo $HOM', '', 'HOM'),
88 ('echo $HOM', '', 'HOM'),
89 ('print sys.pa', '', 'sys.pa'),
89 ('print sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
91 ("execfile('scripts/er", '', 'scripts/er'),
91 ("execfile('scripts/er", '', 'scripts/er'),
92 ('a[x.', '', 'x.'),
92 ('a[x.', '', 'x.'),
93 ('a[x.', 'y', 'x.'),
93 ('a[x.', 'y', 'x.'),
94 ('cd "some_file/', '', 'some_file/'),
94 ('cd "some_file/', '', 'some_file/'),
95 ]
95 ]
96 check_line_split(sp, t)
96 check_line_split(sp, t)
97 # Ensure splitting works OK with unicode by re-running the tests with
97 # Ensure splitting works OK with unicode by re-running the tests with
98 # all inputs turned into unicode
98 # all inputs turned into unicode
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
100
100
101
101
102 def test_custom_completion_error():
102 def test_custom_completion_error():
103 """Test that errors from custom attribute completers are silenced."""
103 """Test that errors from custom attribute completers are silenced."""
104 ip = get_ipython()
104 ip = get_ipython()
105 class A(object): pass
105 class A(object): pass
106 ip.user_ns['a'] = A()
106 ip.user_ns['a'] = A()
107
107
108 @complete_object.when_type(A)
108 @complete_object.when_type(A)
109 def complete_A(a, existing_completions):
109 def complete_A(a, existing_completions):
110 raise TypeError("this should be silenced")
110 raise TypeError("this should be silenced")
111
111
112 ip.complete("a.")
112 ip.complete("a.")
113
113
114
114
115 def test_unicode_completions():
115 def test_unicode_completions():
116 ip = get_ipython()
116 ip = get_ipython()
117 # Some strings that trigger different types of completion. Check them both
117 # Some strings that trigger different types of completion. Check them both
118 # in str and unicode forms
118 # in str and unicode forms
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
120 for t in s + list(map(unicode_type, s)):
120 for t in s + list(map(unicode_type, s)):
121 # We don't need to check exact completion values (they may change
121 # We don't need to check exact completion values (they may change
122 # depending on the state of the namespace, but at least no exceptions
122 # depending on the state of the namespace, but at least no exceptions
123 # should be thrown and the return value should be a pair of text, list
123 # should be thrown and the return value should be a pair of text, list
124 # values.
124 # values.
125 text, matches = ip.complete(t)
125 text, matches = ip.complete(t)
126 nt.assert_true(isinstance(text, string_types))
126 nt.assert_true(isinstance(text, string_types))
127 nt.assert_true(isinstance(matches, list))
127 nt.assert_true(isinstance(matches, list))
128
128
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
130 def test_latex_completions():
130 def test_latex_completions():
131 from IPython.core.latex_symbols import latex_symbols
131 from IPython.core.latex_symbols import latex_symbols
132 import random
132 import random
133 ip = get_ipython()
133 ip = get_ipython()
134 # Test some random unicode symbols
134 # Test some random unicode symbols
135 keys = random.sample(latex_symbols.keys(), 10)
135 keys = random.sample(latex_symbols.keys(), 10)
136 for k in keys:
136 for k in keys:
137 text, matches = ip.complete(k)
137 text, matches = ip.complete(k)
138 nt.assert_equal(len(matches),1)
138 nt.assert_equal(len(matches),1)
139 nt.assert_equal(text, k)
139 nt.assert_equal(text, k)
140 nt.assert_equal(matches[0], latex_symbols[k])
140 nt.assert_equal(matches[0], latex_symbols[k])
141 # Test a more complex line
141 # Test a more complex line
142 text, matches = ip.complete(u'print(\\alpha')
142 text, matches = ip.complete(u'print(\\alpha')
143 nt.assert_equals(text, u'\\alpha')
143 nt.assert_equals(text, u'\\alpha')
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
145 # Test multiple matching latex symbols
145 # Test multiple matching latex symbols
146 text, matches = ip.complete(u'\\al')
146 text, matches = ip.complete(u'\\al')
147 nt.assert_in('\\alpha', matches)
147 nt.assert_in('\\alpha', matches)
148 nt.assert_in('\\aleph', matches)
148 nt.assert_in('\\aleph', matches)
149
149
150
150
151
151
152
152
153 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
153 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
154 def test_back_latex_completion():
154 def test_back_latex_completion():
155 ip = get_ipython()
155 ip = get_ipython()
156
156
157 # do not return more than 1 matches fro \beta, only the latex one.
157 # do not return more than 1 matches fro \beta, only the latex one.
158 name, matches = ip.complete('\\Ξ²')
158 name, matches = ip.complete('\\Ξ²')
159 nt.assert_equal(len(matches), 1)
159 nt.assert_equal(len(matches), 1)
160 nt.assert_equal(matches[0], '\\beta')
160 nt.assert_equal(matches[0], '\\beta')
161
161
162 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
162 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
163 def test_back_unicode_completion():
163 def test_back_unicode_completion():
164 ip = get_ipython()
164 ip = get_ipython()
165
165
166 name, matches = ip.complete('\\β…€')
166 name, matches = ip.complete('\\β…€')
167 nt.assert_equal(len(matches), 1)
167 nt.assert_equal(len(matches), 1)
168 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
168 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
169
169
170
170
171 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
171 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
172 def test_forward_unicode_completion():
172 def test_forward_unicode_completion():
173 ip = get_ipython()
173 ip = get_ipython()
174
174
175 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
175 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
176 nt.assert_equal(len(matches), 1)
176 nt.assert_equal(len(matches), 1)
177 nt.assert_equal(matches[0], 'β…€')
177 nt.assert_equal(matches[0], 'β…€')
178
178
179 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
179 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
180 def test_no_ascii_back_completion():
180 def test_no_ascii_back_completion():
181 ip = get_ipython()
181 ip = get_ipython()
182 with TemporaryWorkingDirectory(): # Avoid any filename completions
182 with TemporaryWorkingDirectory(): # Avoid any filename completions
183 # single ascii letter that don't have yet completions
183 # single ascii letter that don't have yet completions
184 for letter in 'fjqyJMQVWY' :
184 for letter in 'fjqyJMQVWY' :
185 name, matches = ip.complete('\\'+letter)
185 name, matches = ip.complete('\\'+letter)
186 nt.assert_equal(matches, [])
186 nt.assert_equal(matches, [])
187
187
188
188
189
189
190
190
191 class CompletionSplitterTestCase(unittest.TestCase):
191 class CompletionSplitterTestCase(unittest.TestCase):
192 def setUp(self):
192 def setUp(self):
193 self.sp = completer.CompletionSplitter()
193 self.sp = completer.CompletionSplitter()
194
194
195 def test_delim_setting(self):
195 def test_delim_setting(self):
196 self.sp.delims = ' '
196 self.sp.delims = ' '
197 nt.assert_equal(self.sp.delims, ' ')
197 nt.assert_equal(self.sp.delims, ' ')
198 nt.assert_equal(self.sp._delim_expr, '[\ ]')
198 nt.assert_equal(self.sp._delim_expr, '[\ ]')
199
199
200 def test_spaces(self):
200 def test_spaces(self):
201 """Test with only spaces as split chars."""
201 """Test with only spaces as split chars."""
202 self.sp.delims = ' '
202 self.sp.delims = ' '
203 t = [('foo', '', 'foo'),
203 t = [('foo', '', 'foo'),
204 ('run foo', '', 'foo'),
204 ('run foo', '', 'foo'),
205 ('run foo', 'bar', 'foo'),
205 ('run foo', 'bar', 'foo'),
206 ]
206 ]
207 check_line_split(self.sp, t)
207 check_line_split(self.sp, t)
208
208
209
209
210 def test_has_open_quotes1():
210 def test_has_open_quotes1():
211 for s in ["'", "'''", "'hi' '"]:
211 for s in ["'", "'''", "'hi' '"]:
212 nt.assert_equal(completer.has_open_quotes(s), "'")
212 nt.assert_equal(completer.has_open_quotes(s), "'")
213
213
214
214
215 def test_has_open_quotes2():
215 def test_has_open_quotes2():
216 for s in ['"', '"""', '"hi" "']:
216 for s in ['"', '"""', '"hi" "']:
217 nt.assert_equal(completer.has_open_quotes(s), '"')
217 nt.assert_equal(completer.has_open_quotes(s), '"')
218
218
219
219
220 def test_has_open_quotes3():
220 def test_has_open_quotes3():
221 for s in ["''", "''' '''", "'hi' 'ipython'"]:
221 for s in ["''", "''' '''", "'hi' 'ipython'"]:
222 nt.assert_false(completer.has_open_quotes(s))
222 nt.assert_false(completer.has_open_quotes(s))
223
223
224
224
225 def test_has_open_quotes4():
225 def test_has_open_quotes4():
226 for s in ['""', '""" """', '"hi" "ipython"']:
226 for s in ['""', '""" """', '"hi" "ipython"']:
227 nt.assert_false(completer.has_open_quotes(s))
227 nt.assert_false(completer.has_open_quotes(s))
228
228
229
229
230 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
230 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
231 def test_abspath_file_completions():
231 def test_abspath_file_completions():
232 ip = get_ipython()
232 ip = get_ipython()
233 with TemporaryDirectory() as tmpdir:
233 with TemporaryDirectory() as tmpdir:
234 prefix = os.path.join(tmpdir, 'foo')
234 prefix = os.path.join(tmpdir, 'foo')
235 suffixes = ['1', '2']
235 suffixes = ['1', '2']
236 names = [prefix+s for s in suffixes]
236 names = [prefix+s for s in suffixes]
237 for n in names:
237 for n in names:
238 open(n, 'w').close()
238 open(n, 'w').close()
239
239
240 # Check simple completion
240 # Check simple completion
241 c = ip.complete(prefix)[1]
241 c = ip.complete(prefix)[1]
242 nt.assert_equal(c, names)
242 nt.assert_equal(c, names)
243
243
244 # Now check with a function call
244 # Now check with a function call
245 cmd = 'a = f("%s' % prefix
245 cmd = 'a = f("%s' % prefix
246 c = ip.complete(prefix, cmd)[1]
246 c = ip.complete(prefix, cmd)[1]
247 comp = [prefix+s for s in suffixes]
247 comp = [prefix+s for s in suffixes]
248 nt.assert_equal(c, comp)
248 nt.assert_equal(c, comp)
249
249
250
250
251 def test_local_file_completions():
251 def test_local_file_completions():
252 ip = get_ipython()
252 ip = get_ipython()
253 with TemporaryWorkingDirectory():
253 with TemporaryWorkingDirectory():
254 prefix = './foo'
254 prefix = './foo'
255 suffixes = ['1', '2']
255 suffixes = ['1', '2']
256 names = [prefix+s for s in suffixes]
256 names = [prefix+s for s in suffixes]
257 for n in names:
257 for n in names:
258 open(n, 'w').close()
258 open(n, 'w').close()
259
259
260 # Check simple completion
260 # Check simple completion
261 c = ip.complete(prefix)[1]
261 c = ip.complete(prefix)[1]
262 nt.assert_equal(c, names)
262 nt.assert_equal(c, names)
263
263
264 # Now check with a function call
264 # Now check with a function call
265 cmd = 'a = f("%s' % prefix
265 cmd = 'a = f("%s' % prefix
266 c = ip.complete(prefix, cmd)[1]
266 c = ip.complete(prefix, cmd)[1]
267 comp = [prefix+s for s in suffixes]
267 comp = [prefix+s for s in suffixes]
268 nt.assert_equal(c, comp)
268 nt.assert_equal(c, comp)
269
269
270
270
271 def test_greedy_completions():
271 def test_greedy_completions():
272 ip = get_ipython()
272 ip = get_ipython()
273 ip.ex('a=list(range(5))')
273 ip.ex('a=list(range(5))')
274 _,c = ip.complete('.',line='a[0].')
274 _,c = ip.complete('.',line='a[0].')
275 nt.assert_false('a[0].real' in c,
275 nt.assert_false('a[0].real' in c,
276 "Shouldn't have completed on a[0]: %s"%c)
276 "Shouldn't have completed on a[0]: %s"%c)
277 with greedy_completion():
277 with greedy_completion():
278 _,c = ip.complete('.',line='a[0].')
278 _,c = ip.complete('.',line='a[0].')
279 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
279 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
280
280
281
281
282 def test_omit__names():
282 def test_omit__names():
283 # also happens to test IPCompleter as a configurable
283 # also happens to test IPCompleter as a configurable
284 ip = get_ipython()
284 ip = get_ipython()
285 ip._hidden_attr = 1
285 ip._hidden_attr = 1
286 ip._x = {}
286 ip._x = {}
287 c = ip.Completer
287 c = ip.Completer
288 ip.ex('ip=get_ipython()')
288 ip.ex('ip=get_ipython()')
289 cfg = Config()
289 cfg = Config()
290 cfg.IPCompleter.omit__names = 0
290 cfg.IPCompleter.omit__names = 0
291 c.update_config(cfg)
291 c.update_config(cfg)
292 s,matches = c.complete('ip.')
292 s,matches = c.complete('ip.')
293 nt.assert_in('ip.__str__', matches)
293 nt.assert_in('ip.__str__', matches)
294 nt.assert_in('ip._hidden_attr', matches)
294 nt.assert_in('ip._hidden_attr', matches)
295 cfg = Config()
295 cfg = Config()
296 cfg.IPCompleter.omit__names = 1
296 cfg.IPCompleter.omit__names = 1
297 c.update_config(cfg)
297 c.update_config(cfg)
298 s,matches = c.complete('ip.')
298 s,matches = c.complete('ip.')
299 nt.assert_not_in('ip.__str__', matches)
299 nt.assert_not_in('ip.__str__', matches)
300 nt.assert_in('ip._hidden_attr', matches)
300 nt.assert_in('ip._hidden_attr', matches)
301 cfg = Config()
301 cfg = Config()
302 cfg.IPCompleter.omit__names = 2
302 cfg.IPCompleter.omit__names = 2
303 c.update_config(cfg)
303 c.update_config(cfg)
304 s,matches = c.complete('ip.')
304 s,matches = c.complete('ip.')
305 nt.assert_not_in('ip.__str__', matches)
305 nt.assert_not_in('ip.__str__', matches)
306 nt.assert_not_in('ip._hidden_attr', matches)
306 nt.assert_not_in('ip._hidden_attr', matches)
307 s,matches = c.complete('ip._x.')
307 s,matches = c.complete('ip._x.')
308 nt.assert_in('ip._x.keys', matches)
308 nt.assert_in('ip._x.keys', matches)
309 del ip._hidden_attr
309 del ip._hidden_attr
310
310
311
311
312 def test_limit_to__all__False_ok():
312 def test_limit_to__all__False_ok():
313 ip = get_ipython()
313 ip = get_ipython()
314 c = ip.Completer
314 c = ip.Completer
315 ip.ex('class D: x=24')
315 ip.ex('class D: x=24')
316 ip.ex('d=D()')
316 ip.ex('d=D()')
317 cfg = Config()
317 cfg = Config()
318 cfg.IPCompleter.limit_to__all__ = False
318 cfg.IPCompleter.limit_to__all__ = False
319 c.update_config(cfg)
319 c.update_config(cfg)
320 s, matches = c.complete('d.')
320 s, matches = c.complete('d.')
321 nt.assert_in('d.x', matches)
321 nt.assert_in('d.x', matches)
322
322
323
323
324 def test_limit_to__all__True_ok():
324 def test_limit_to__all__True_ok():
325 ip = get_ipython()
325 ip = get_ipython()
326 c = ip.Completer
326 c = ip.Completer
327 ip.ex('class D: x=24')
327 ip.ex('class D: x=24')
328 ip.ex('d=D()')
328 ip.ex('d=D()')
329 ip.ex("d.__all__=['z']")
329 ip.ex("d.__all__=['z']")
330 cfg = Config()
330 cfg = Config()
331 cfg.IPCompleter.limit_to__all__ = True
331 cfg.IPCompleter.limit_to__all__ = True
332 c.update_config(cfg)
332 c.update_config(cfg)
333 s, matches = c.complete('d.')
333 s, matches = c.complete('d.')
334 nt.assert_in('d.z', matches)
334 nt.assert_in('d.z', matches)
335 nt.assert_not_in('d.x', matches)
335 nt.assert_not_in('d.x', matches)
336
336
337
337
338 def test_get__all__entries_ok():
338 def test_get__all__entries_ok():
339 class A(object):
339 class A(object):
340 __all__ = ['x', 1]
340 __all__ = ['x', 1]
341 words = completer.get__all__entries(A())
341 words = completer.get__all__entries(A())
342 nt.assert_equal(words, ['x'])
342 nt.assert_equal(words, ['x'])
343
343
344
344
345 def test_get__all__entries_no__all__ok():
345 def test_get__all__entries_no__all__ok():
346 class A(object):
346 class A(object):
347 pass
347 pass
348 words = completer.get__all__entries(A())
348 words = completer.get__all__entries(A())
349 nt.assert_equal(words, [])
349 nt.assert_equal(words, [])
350
350
351
351
352 def test_func_kw_completions():
352 def test_func_kw_completions():
353 ip = get_ipython()
353 ip = get_ipython()
354 c = ip.Completer
354 c = ip.Completer
355 ip.ex('def myfunc(a=1,b=2): return a+b')
355 ip.ex('def myfunc(a=1,b=2): return a+b')
356 s, matches = c.complete(None, 'myfunc(1,b')
356 s, matches = c.complete(None, 'myfunc(1,b')
357 nt.assert_in('b=', matches)
357 nt.assert_in('b=', matches)
358 # Simulate completing with cursor right after b (pos==10):
358 # Simulate completing with cursor right after b (pos==10):
359 s, matches = c.complete(None, 'myfunc(1,b)', 10)
359 s, matches = c.complete(None, 'myfunc(1,b)', 10)
360 nt.assert_in('b=', matches)
360 nt.assert_in('b=', matches)
361 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
361 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
362 nt.assert_in('b=', matches)
362 nt.assert_in('b=', matches)
363 #builtin function
363 #builtin function
364 s, matches = c.complete(None, 'min(k, k')
364 s, matches = c.complete(None, 'min(k, k')
365 nt.assert_in('key=', matches)
365 nt.assert_in('key=', matches)
366
366
367
367
368 def test_default_arguments_from_docstring():
368 def test_default_arguments_from_docstring():
369 doc = min.__doc__
369 doc = min.__doc__
370 ip = get_ipython()
370 ip = get_ipython()
371 c = ip.Completer
371 c = ip.Completer
372 kwd = c._default_arguments_from_docstring(
372 kwd = c._default_arguments_from_docstring(
373 'min(iterable[, key=func]) -> value')
373 'min(iterable[, key=func]) -> value')
374 nt.assert_equal(kwd, ['key'])
374 nt.assert_equal(kwd, ['key'])
375 #with cython type etc
375 #with cython type etc
376 kwd = c._default_arguments_from_docstring(
376 kwd = c._default_arguments_from_docstring(
377 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
377 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
378 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
378 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
379 #white spaces
379 #white spaces
380 kwd = c._default_arguments_from_docstring(
380 kwd = c._default_arguments_from_docstring(
381 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
381 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
382 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
382 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
383
383
384 def test_line_magics():
384 def test_line_magics():
385 ip = get_ipython()
385 ip = get_ipython()
386 c = ip.Completer
386 c = ip.Completer
387 s, matches = c.complete(None, 'lsmag')
387 s, matches = c.complete(None, 'lsmag')
388 nt.assert_in('%lsmagic', matches)
388 nt.assert_in('%lsmagic', matches)
389 s, matches = c.complete(None, '%lsmag')
389 s, matches = c.complete(None, '%lsmag')
390 nt.assert_in('%lsmagic', matches)
390 nt.assert_in('%lsmagic', matches)
391
391
392
392
393 def test_cell_magics():
393 def test_cell_magics():
394 from IPython.core.magic import register_cell_magic
394 from IPython.core.magic import register_cell_magic
395
395
396 @register_cell_magic
396 @register_cell_magic
397 def _foo_cellm(line, cell):
397 def _foo_cellm(line, cell):
398 pass
398 pass
399
399
400 ip = get_ipython()
400 ip = get_ipython()
401 c = ip.Completer
401 c = ip.Completer
402
402
403 s, matches = c.complete(None, '_foo_ce')
403 s, matches = c.complete(None, '_foo_ce')
404 nt.assert_in('%%_foo_cellm', matches)
404 nt.assert_in('%%_foo_cellm', matches)
405 s, matches = c.complete(None, '%%_foo_ce')
405 s, matches = c.complete(None, '%%_foo_ce')
406 nt.assert_in('%%_foo_cellm', matches)
406 nt.assert_in('%%_foo_cellm', matches)
407
407
408
408
409 def test_line_cell_magics():
409 def test_line_cell_magics():
410 from IPython.core.magic import register_line_cell_magic
410 from IPython.core.magic import register_line_cell_magic
411
411
412 @register_line_cell_magic
412 @register_line_cell_magic
413 def _bar_cellm(line, cell):
413 def _bar_cellm(line, cell):
414 pass
414 pass
415
415
416 ip = get_ipython()
416 ip = get_ipython()
417 c = ip.Completer
417 c = ip.Completer
418
418
419 # The policy here is trickier, see comments in completion code. The
419 # The policy here is trickier, see comments in completion code. The
420 # returned values depend on whether the user passes %% or not explicitly,
420 # returned values depend on whether the user passes %% or not explicitly,
421 # and this will show a difference if the same name is both a line and cell
421 # and this will show a difference if the same name is both a line and cell
422 # magic.
422 # magic.
423 s, matches = c.complete(None, '_bar_ce')
423 s, matches = c.complete(None, '_bar_ce')
424 nt.assert_in('%_bar_cellm', matches)
424 nt.assert_in('%_bar_cellm', matches)
425 nt.assert_in('%%_bar_cellm', matches)
425 nt.assert_in('%%_bar_cellm', matches)
426 s, matches = c.complete(None, '%_bar_ce')
426 s, matches = c.complete(None, '%_bar_ce')
427 nt.assert_in('%_bar_cellm', matches)
427 nt.assert_in('%_bar_cellm', matches)
428 nt.assert_in('%%_bar_cellm', matches)
428 nt.assert_in('%%_bar_cellm', matches)
429 s, matches = c.complete(None, '%%_bar_ce')
429 s, matches = c.complete(None, '%%_bar_ce')
430 nt.assert_not_in('%_bar_cellm', matches)
430 nt.assert_not_in('%_bar_cellm', matches)
431 nt.assert_in('%%_bar_cellm', matches)
431 nt.assert_in('%%_bar_cellm', matches)
432
432
433
433
434 def test_magic_completion_order():
434 def test_magic_completion_order():
435
435
436 ip = get_ipython()
436 ip = get_ipython()
437 c = ip.Completer
437 c = ip.Completer
438
438
439 # Test ordering of magics and non-magics with the same name
439 # Test ordering of magics and non-magics with the same name
440 # We want the non-magic first
440 # We want the non-magic first
441
441
442 # Before importing matplotlib, there should only be one option:
442 # Before importing matplotlib, there should only be one option:
443
443
444 text, matches = c.complete('mat')
444 text, matches = c.complete('mat')
445 nt.assert_equal(matches, ["%matplotlib"])
445 nt.assert_equal(matches, ["%matplotlib"])
446
446
447
447
448 ip.run_cell("matplotlib = 1") # introduce name into namespace
448 ip.run_cell("matplotlib = 1") # introduce name into namespace
449
449
450 # After the import, there should be two options, ordered like this:
450 # After the import, there should be two options, ordered like this:
451 text, matches = c.complete('mat')
451 text, matches = c.complete('mat')
452 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
452 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
453
453
454
454
455 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
455 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
456
456
457 # Order of user variable and line and cell magics with same name:
457 # Order of user variable and line and cell magics with same name:
458 text, matches = c.complete('timeit')
458 text, matches = c.complete('timeit')
459 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
459 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
460
460
461
461
462 def test_dict_key_completion_string():
462 def test_dict_key_completion_string():
463 """Test dictionary key completion for string keys"""
463 """Test dictionary key completion for string keys"""
464 ip = get_ipython()
464 ip = get_ipython()
465 complete = ip.Completer.complete
465 complete = ip.Completer.complete
466
466
467 ip.user_ns['d'] = {'abc': None}
467 ip.user_ns['d'] = {'abc': None}
468
468
469 # check completion at different stages
469 # check completion at different stages
470 _, matches = complete(line_buffer="d[")
470 _, matches = complete(line_buffer="d[")
471 nt.assert_in("'abc'", matches)
471 nt.assert_in("'abc'", matches)
472 nt.assert_not_in("'abc']", matches)
472 nt.assert_not_in("'abc']", matches)
473
473
474 _, matches = complete(line_buffer="d['")
474 _, matches = complete(line_buffer="d['")
475 nt.assert_in("abc", matches)
475 nt.assert_in("abc", matches)
476 nt.assert_not_in("abc']", matches)
476 nt.assert_not_in("abc']", matches)
477
477
478 _, matches = complete(line_buffer="d['a")
478 _, matches = complete(line_buffer="d['a")
479 nt.assert_in("abc", matches)
479 nt.assert_in("abc", matches)
480 nt.assert_not_in("abc']", matches)
480 nt.assert_not_in("abc']", matches)
481
481
482 # check use of different quoting
482 # check use of different quoting
483 _, matches = complete(line_buffer="d[\"")
483 _, matches = complete(line_buffer="d[\"")
484 nt.assert_in("abc", matches)
484 nt.assert_in("abc", matches)
485 nt.assert_not_in('abc\"]', matches)
485 nt.assert_not_in('abc\"]', matches)
486
486
487 _, matches = complete(line_buffer="d[\"a")
487 _, matches = complete(line_buffer="d[\"a")
488 nt.assert_in("abc", matches)
488 nt.assert_in("abc", matches)
489 nt.assert_not_in('abc\"]', matches)
489 nt.assert_not_in('abc\"]', matches)
490
490
491 # check sensitivity to following context
491 # check sensitivity to following context
492 _, matches = complete(line_buffer="d[]", cursor_pos=2)
492 _, matches = complete(line_buffer="d[]", cursor_pos=2)
493 nt.assert_in("'abc'", matches)
493 nt.assert_in("'abc'", matches)
494
494
495 _, matches = complete(line_buffer="d['']", cursor_pos=3)
495 _, matches = complete(line_buffer="d['']", cursor_pos=3)
496 nt.assert_in("abc", matches)
496 nt.assert_in("abc", matches)
497 nt.assert_not_in("abc'", matches)
497 nt.assert_not_in("abc'", matches)
498 nt.assert_not_in("abc']", matches)
498 nt.assert_not_in("abc']", matches)
499
499
500 # check multiple solutions are correctly returned and that noise is not
500 # check multiple solutions are correctly returned and that noise is not
501 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
501 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
502 5: None}
502 5: None}
503
503
504 _, matches = complete(line_buffer="d['a")
504 _, matches = complete(line_buffer="d['a")
505 nt.assert_in("abc", matches)
505 nt.assert_in("abc", matches)
506 nt.assert_in("abd", matches)
506 nt.assert_in("abd", matches)
507 nt.assert_not_in("bad", matches)
507 nt.assert_not_in("bad", matches)
508 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
508 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
509
509
510 # check escaping and whitespace
510 # check escaping and whitespace
511 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
511 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
512 _, matches = complete(line_buffer="d['a")
512 _, matches = complete(line_buffer="d['a")
513 nt.assert_in("a\\nb", matches)
513 nt.assert_in("a\\nb", matches)
514 nt.assert_in("a\\'b", matches)
514 nt.assert_in("a\\'b", matches)
515 nt.assert_in("a\"b", matches)
515 nt.assert_in("a\"b", matches)
516 nt.assert_in("a word", matches)
516 nt.assert_in("a word", matches)
517 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
517 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
518
518
519 # - can complete on non-initial word of the string
519 # - can complete on non-initial word of the string
520 _, matches = complete(line_buffer="d['a w")
520 _, matches = complete(line_buffer="d['a w")
521 nt.assert_in("word", matches)
521 nt.assert_in("word", matches)
522
522
523 # - understands quote escaping
523 # - understands quote escaping
524 _, matches = complete(line_buffer="d['a\\'")
524 _, matches = complete(line_buffer="d['a\\'")
525 nt.assert_in("b", matches)
525 nt.assert_in("b", matches)
526
526
527 # - default quoting should work like repr
527 # - default quoting should work like repr
528 _, matches = complete(line_buffer="d[")
528 _, matches = complete(line_buffer="d[")
529 nt.assert_in("\"a'b\"", matches)
529 nt.assert_in("\"a'b\"", matches)
530
530
531 # - when opening quote with ", possible to match with unescaped apostrophe
531 # - when opening quote with ", possible to match with unescaped apostrophe
532 _, matches = complete(line_buffer="d[\"a'")
532 _, matches = complete(line_buffer="d[\"a'")
533 nt.assert_in("b", matches)
533 nt.assert_in("b", matches)
534
534
535 # need to not split at delims that readline won't split at
535 # need to not split at delims that readline won't split at
536 if '-' not in ip.Completer.splitter.delims:
536 if '-' not in ip.Completer.splitter.delims:
537 ip.user_ns['d'] = {'before-after': None}
537 ip.user_ns['d'] = {'before-after': None}
538 _, matches = complete(line_buffer="d['before-af")
538 _, matches = complete(line_buffer="d['before-af")
539 nt.assert_in('before-after', matches)
539 nt.assert_in('before-after', matches)
540
540
541 def test_dict_key_completion_contexts():
541 def test_dict_key_completion_contexts():
542 """Test expression contexts in which dict key completion occurs"""
542 """Test expression contexts in which dict key completion occurs"""
543 ip = get_ipython()
543 ip = get_ipython()
544 complete = ip.Completer.complete
544 complete = ip.Completer.complete
545 d = {'abc': None}
545 d = {'abc': None}
546 ip.user_ns['d'] = d
546 ip.user_ns['d'] = d
547
547
548 class C:
548 class C:
549 data = d
549 data = d
550 ip.user_ns['C'] = C
550 ip.user_ns['C'] = C
551 ip.user_ns['get'] = lambda: d
551 ip.user_ns['get'] = lambda: d
552
552
553 def assert_no_completion(**kwargs):
553 def assert_no_completion(**kwargs):
554 _, matches = complete(**kwargs)
554 _, matches = complete(**kwargs)
555 nt.assert_not_in('abc', matches)
555 nt.assert_not_in('abc', matches)
556 nt.assert_not_in('abc\'', matches)
556 nt.assert_not_in('abc\'', matches)
557 nt.assert_not_in('abc\']', matches)
557 nt.assert_not_in('abc\']', matches)
558 nt.assert_not_in('\'abc\'', matches)
558 nt.assert_not_in('\'abc\'', matches)
559 nt.assert_not_in('\'abc\']', matches)
559 nt.assert_not_in('\'abc\']', matches)
560
560
561 def assert_completion(**kwargs):
561 def assert_completion(**kwargs):
562 _, matches = complete(**kwargs)
562 _, matches = complete(**kwargs)
563 nt.assert_in("'abc'", matches)
563 nt.assert_in("'abc'", matches)
564 nt.assert_not_in("'abc']", matches)
564 nt.assert_not_in("'abc']", matches)
565
565
566 # no completion after string closed, even if reopened
566 # no completion after string closed, even if reopened
567 assert_no_completion(line_buffer="d['a'")
567 assert_no_completion(line_buffer="d['a'")
568 assert_no_completion(line_buffer="d[\"a\"")
568 assert_no_completion(line_buffer="d[\"a\"")
569 assert_no_completion(line_buffer="d['a' + ")
569 assert_no_completion(line_buffer="d['a' + ")
570 assert_no_completion(line_buffer="d['a' + '")
570 assert_no_completion(line_buffer="d['a' + '")
571
571
572 # completion in non-trivial expressions
572 # completion in non-trivial expressions
573 assert_completion(line_buffer="+ d[")
573 assert_completion(line_buffer="+ d[")
574 assert_completion(line_buffer="(d[")
574 assert_completion(line_buffer="(d[")
575 assert_completion(line_buffer="C.data[")
575 assert_completion(line_buffer="C.data[")
576
576
577 # greedy flag
577 # greedy flag
578 def assert_completion(**kwargs):
578 def assert_completion(**kwargs):
579 _, matches = complete(**kwargs)
579 _, matches = complete(**kwargs)
580 nt.assert_in("get()['abc']", matches)
580 nt.assert_in("get()['abc']", matches)
581
581
582 assert_no_completion(line_buffer="get()[")
582 assert_no_completion(line_buffer="get()[")
583 with greedy_completion():
583 with greedy_completion():
584 assert_completion(line_buffer="get()[")
584 assert_completion(line_buffer="get()[")
585 assert_completion(line_buffer="get()['")
585 assert_completion(line_buffer="get()['")
586 assert_completion(line_buffer="get()['a")
586 assert_completion(line_buffer="get()['a")
587 assert_completion(line_buffer="get()['ab")
587 assert_completion(line_buffer="get()['ab")
588 assert_completion(line_buffer="get()['abc")
588 assert_completion(line_buffer="get()['abc")
589
589
590
590
591
591
592 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
592 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
593 def test_dict_key_completion_bytes():
593 def test_dict_key_completion_bytes():
594 """Test handling of bytes in dict key completion"""
594 """Test handling of bytes in dict key completion"""
595 ip = get_ipython()
595 ip = get_ipython()
596 complete = ip.Completer.complete
596 complete = ip.Completer.complete
597
597
598 ip.user_ns['d'] = {'abc': None, b'abd': None}
598 ip.user_ns['d'] = {'abc': None, b'abd': None}
599
599
600 _, matches = complete(line_buffer="d[")
600 _, matches = complete(line_buffer="d[")
601 nt.assert_in("'abc'", matches)
601 nt.assert_in("'abc'", matches)
602 nt.assert_in("b'abd'", matches)
602 nt.assert_in("b'abd'", matches)
603
603
604 if False: # not currently implemented
604 if False: # not currently implemented
605 _, matches = complete(line_buffer="d[b")
605 _, matches = complete(line_buffer="d[b")
606 nt.assert_in("b'abd'", matches)
606 nt.assert_in("b'abd'", matches)
607 nt.assert_not_in("b'abc'", matches)
607 nt.assert_not_in("b'abc'", matches)
608
608
609 _, matches = complete(line_buffer="d[b'")
609 _, matches = complete(line_buffer="d[b'")
610 nt.assert_in("abd", matches)
610 nt.assert_in("abd", matches)
611 nt.assert_not_in("abc", matches)
611 nt.assert_not_in("abc", matches)
612
612
613 _, matches = complete(line_buffer="d[B'")
613 _, matches = complete(line_buffer="d[B'")
614 nt.assert_in("abd", matches)
614 nt.assert_in("abd", matches)
615 nt.assert_not_in("abc", matches)
615 nt.assert_not_in("abc", matches)
616
616
617 _, matches = complete(line_buffer="d['")
617 _, matches = complete(line_buffer="d['")
618 nt.assert_in("abc", matches)
618 nt.assert_in("abc", matches)
619 nt.assert_not_in("abd", matches)
619 nt.assert_not_in("abd", matches)
620
620
621
621
622 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
622 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
623 def test_dict_key_completion_unicode_py2():
623 def test_dict_key_completion_unicode_py2():
624 """Test handling of unicode in dict key completion"""
624 """Test handling of unicode in dict key completion"""
625 ip = get_ipython()
625 ip = get_ipython()
626 complete = ip.Completer.complete
626 complete = ip.Completer.complete
627
627
628 ip.user_ns['d'] = {u'abc': None,
628 ip.user_ns['d'] = {u'abc': None,
629 u'a\u05d0b': None}
629 u'a\u05d0b': None}
630
630
631 _, matches = complete(line_buffer="d[")
631 _, matches = complete(line_buffer="d[")
632 nt.assert_in("u'abc'", matches)
632 nt.assert_in("u'abc'", matches)
633 nt.assert_in("u'a\\u05d0b'", matches)
633 nt.assert_in("u'a\\u05d0b'", matches)
634
634
635 _, matches = complete(line_buffer="d['a")
635 _, matches = complete(line_buffer="d['a")
636 nt.assert_in("abc", matches)
636 nt.assert_in("abc", matches)
637 nt.assert_not_in("a\\u05d0b", matches)
637 nt.assert_not_in("a\\u05d0b", matches)
638
638
639 _, matches = complete(line_buffer="d[u'a")
639 _, matches = complete(line_buffer="d[u'a")
640 nt.assert_in("abc", matches)
640 nt.assert_in("abc", matches)
641 nt.assert_in("a\\u05d0b", matches)
641 nt.assert_in("a\\u05d0b", matches)
642
642
643 _, matches = complete(line_buffer="d[U'a")
643 _, matches = complete(line_buffer="d[U'a")
644 nt.assert_in("abc", matches)
644 nt.assert_in("abc", matches)
645 nt.assert_in("a\\u05d0b", matches)
645 nt.assert_in("a\\u05d0b", matches)
646
646
647 # query using escape
647 # query using escape
648 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
648 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
649 nt.assert_in("u05d0b", matches) # tokenized after \\
649 nt.assert_in("u05d0b", matches) # tokenized after \\
650
650
651 # query using character
651 # query using character
652 _, matches = complete(line_buffer=u"d[u'a\u05d0")
652 _, matches = complete(line_buffer=u"d[u'a\u05d0")
653 nt.assert_in(u"a\u05d0b", matches)
653 nt.assert_in(u"a\u05d0b", matches)
654
654
655 with greedy_completion():
655 with greedy_completion():
656 _, matches = complete(line_buffer="d[")
656 _, matches = complete(line_buffer="d[")
657 nt.assert_in("d[u'abc']", matches)
657 nt.assert_in("d[u'abc']", matches)
658 nt.assert_in("d[u'a\\u05d0b']", matches)
658 nt.assert_in("d[u'a\\u05d0b']", matches)
659
659
660 _, matches = complete(line_buffer="d['a")
660 _, matches = complete(line_buffer="d['a")
661 nt.assert_in("d['abc']", matches)
661 nt.assert_in("d['abc']", matches)
662 nt.assert_not_in("d[u'a\\u05d0b']", matches)
662 nt.assert_not_in("d[u'a\\u05d0b']", matches)
663
663
664 _, matches = complete(line_buffer="d[u'a")
664 _, matches = complete(line_buffer="d[u'a")
665 nt.assert_in("d[u'abc']", matches)
665 nt.assert_in("d[u'abc']", matches)
666 nt.assert_in("d[u'a\\u05d0b']", matches)
666 nt.assert_in("d[u'a\\u05d0b']", matches)
667
667
668 _, matches = complete(line_buffer="d[U'a")
668 _, matches = complete(line_buffer="d[U'a")
669 nt.assert_in("d[U'abc']", matches)
669 nt.assert_in("d[U'abc']", matches)
670 nt.assert_in("d[U'a\\u05d0b']", matches)
670 nt.assert_in("d[U'a\\u05d0b']", matches)
671
671
672 # query using escape
672 # query using escape
673 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
673 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
674 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
674 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
675
675
676 # query using character
676 # query using character
677 _, matches = complete(line_buffer=u"d[u'a\u05d0")
677 _, matches = complete(line_buffer=u"d[u'a\u05d0")
678 nt.assert_in(u"d[u'a\u05d0b']", matches)
678 nt.assert_in(u"d[u'a\u05d0b']", matches)
679
679
680
680
681 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
681 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
682 def test_dict_key_completion_unicode_py3():
682 def test_dict_key_completion_unicode_py3():
683 """Test handling of unicode in dict key completion"""
683 """Test handling of unicode in dict key completion"""
684 ip = get_ipython()
684 ip = get_ipython()
685 complete = ip.Completer.complete
685 complete = ip.Completer.complete
686
686
687 ip.user_ns['d'] = {u'a\u05d0': None}
687 ip.user_ns['d'] = {u'a\u05d0': None}
688
688
689 # query using escape
689 # query using escape
690 _, matches = complete(line_buffer="d['a\\u05d0")
690 _, matches = complete(line_buffer="d['a\\u05d0")
691 nt.assert_in("u05d0", matches) # tokenized after \\
691 nt.assert_in("u05d0", matches) # tokenized after \\
692
692
693 # query using character
693 # query using character
694 _, matches = complete(line_buffer="d['a\u05d0")
694 _, matches = complete(line_buffer="d['a\u05d0")
695 nt.assert_in(u"a\u05d0", matches)
695 nt.assert_in(u"a\u05d0", matches)
696
696
697 with greedy_completion():
697 with greedy_completion():
698 # query using escape
698 # query using escape
699 _, matches = complete(line_buffer="d['a\\u05d0")
699 _, matches = complete(line_buffer="d['a\\u05d0")
700 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
700 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
701
701
702 # query using character
702 # query using character
703 _, matches = complete(line_buffer="d['a\u05d0")
703 _, matches = complete(line_buffer="d['a\u05d0")
704 nt.assert_in(u"d['a\u05d0']", matches)
704 nt.assert_in(u"d['a\u05d0']", matches)
705
705
706
706
707
707
708 @dec.skip_without('numpy')
708 @dec.skip_without('numpy')
709 def test_struct_array_key_completion():
709 def test_struct_array_key_completion():
710 """Test dict key completion applies to numpy struct arrays"""
710 """Test dict key completion applies to numpy struct arrays"""
711 import numpy
711 import numpy
712 ip = get_ipython()
712 ip = get_ipython()
713 complete = ip.Completer.complete
713 complete = ip.Completer.complete
714 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
714 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
715 _, matches = complete(line_buffer="d['")
715 _, matches = complete(line_buffer="d['")
716 nt.assert_in("hello", matches)
716 nt.assert_in("hello", matches)
717 nt.assert_in("world", matches)
717 nt.assert_in("world", matches)
718 # complete on the numpy struct itself
718 # complete on the numpy struct itself
719 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
719 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
720 ('my_data', '>f4', 5)])
720 ('my_data', '>f4', 5)])
721 x = numpy.zeros(2, dtype=dt)
721 x = numpy.zeros(2, dtype=dt)
722 ip.user_ns['d'] = x[1]
722 ip.user_ns['d'] = x[1]
723 _, matches = complete(line_buffer="d['")
723 _, matches = complete(line_buffer="d['")
724 nt.assert_in("my_head", matches)
724 nt.assert_in("my_head", matches)
725 nt.assert_in("my_data", matches)
725 nt.assert_in("my_data", matches)
726 # complete on a nested level
726 # complete on a nested level
727 with greedy_completion():
727 with greedy_completion():
728 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
728 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
729 _, matches = complete(line_buffer="d[1]['my_head']['")
729 _, matches = complete(line_buffer="d[1]['my_head']['")
730 nt.assert_true(any(["my_dt" in m for m in matches]))
730 nt.assert_true(any(["my_dt" in m for m in matches]))
731 nt.assert_true(any(["my_df" in m for m in matches]))
731 nt.assert_true(any(["my_df" in m for m in matches]))
732
732
733
733
734 @dec.skip_without('pandas')
734 @dec.skip_without('pandas')
735 def test_dataframe_key_completion():
735 def test_dataframe_key_completion():
736 """Test dict key completion applies to pandas DataFrames"""
736 """Test dict key completion applies to pandas DataFrames"""
737 import pandas
737 import pandas
738 ip = get_ipython()
738 ip = get_ipython()
739 complete = ip.Completer.complete
739 complete = ip.Completer.complete
740 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
740 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
741 _, matches = complete(line_buffer="d['")
741 _, matches = complete(line_buffer="d['")
742 nt.assert_in("hello", matches)
742 nt.assert_in("hello", matches)
743 nt.assert_in("world", matches)
743 nt.assert_in("world", matches)
744
744
745
745
746 def test_dict_key_completion_invalids():
746 def test_dict_key_completion_invalids():
747 """Smoke test cases dict key completion can't handle"""
747 """Smoke test cases dict key completion can't handle"""
748 ip = get_ipython()
748 ip = get_ipython()
749 complete = ip.Completer.complete
749 complete = ip.Completer.complete
750
750
751 ip.user_ns['no_getitem'] = None
751 ip.user_ns['no_getitem'] = None
752 ip.user_ns['no_keys'] = []
752 ip.user_ns['no_keys'] = []
753 ip.user_ns['cant_call_keys'] = dict
753 ip.user_ns['cant_call_keys'] = dict
754 ip.user_ns['empty'] = {}
754 ip.user_ns['empty'] = {}
755 ip.user_ns['d'] = {'abc': 5}
755 ip.user_ns['d'] = {'abc': 5}
756
756
757 _, matches = complete(line_buffer="no_getitem['")
757 _, matches = complete(line_buffer="no_getitem['")
758 _, matches = complete(line_buffer="no_keys['")
758 _, matches = complete(line_buffer="no_keys['")
759 _, matches = complete(line_buffer="cant_call_keys['")
759 _, matches = complete(line_buffer="cant_call_keys['")
760 _, matches = complete(line_buffer="empty['")
760 _, matches = complete(line_buffer="empty['")
761 _, matches = complete(line_buffer="name_error['")
761 _, matches = complete(line_buffer="name_error['")
762 _, matches = complete(line_buffer="d['\\") # incomplete escape
762 _, matches = complete(line_buffer="d['\\") # incomplete escape
763
763
764 class KeyCompletable(object):
764 class KeyCompletable(object):
765 def __init__(self, things=()):
765 def __init__(self, things=()):
766 self.things = things
766 self.things = things
767
767
768 def _ipy_key_completions_(self):
768 def _ipython_key_completions_(self):
769 return list(self.things)
769 return list(self.things)
770
770
771 def test_object_key_completion():
771 def test_object_key_completion():
772 ip = get_ipython()
772 ip = get_ipython()
773 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
773 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
774
774
775 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
775 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
776 nt.assert_in('qwerty', matches)
776 nt.assert_in('qwerty', matches)
777 nt.assert_in('qwick', matches)
777 nt.assert_in('qwick', matches)
778
778
779
779
780 def test_aimport_module_completer():
780 def test_aimport_module_completer():
781 ip = get_ipython()
781 ip = get_ipython()
782 _, matches = ip.complete('i', '%aimport i')
782 _, matches = ip.complete('i', '%aimport i')
783 nt.assert_in('io', matches)
783 nt.assert_in('io', matches)
784 nt.assert_not_in('int', matches)
784 nt.assert_not_in('int', matches)
785
785
786 def test_import_module_completer():
786 def test_import_module_completer():
787 ip = get_ipython()
787 ip = get_ipython()
788 _, matches = ip.complete('i', 'import i')
788 _, matches = ip.complete('i', 'import i')
789 nt.assert_in('io', matches)
789 nt.assert_in('io', matches)
790 nt.assert_not_in('int', matches)
790 nt.assert_not_in('int', matches)
791
791
792 def test_from_module_completer():
792 def test_from_module_completer():
793 ip = get_ipython()
793 ip = get_ipython()
794 _, matches = ip.complete('B', 'from io import B')
794 _, matches = ip.complete('B', 'from io import B')
795 nt.assert_in('BytesIO', matches)
795 nt.assert_in('BytesIO', matches)
796 nt.assert_not_in('BaseException', matches)
796 nt.assert_not_in('BaseException', matches)
General Comments 0
You need to be logged in to leave comments. Login now