##// END OF EJS Templates
Allow objects to define their own key completions...
Thomas Kluyver -
Show More
@@ -1,1272 +1,1289 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Word completion for IPython.
2 """Word completion for IPython.
3
3
4 This module is a fork of the rlcompleter module in the Python standard
4 This module is a fork of the rlcompleter module in the Python standard
5 library. The original enhancements made to rlcompleter have been sent
5 library. The original enhancements made to rlcompleter have been sent
6 upstream and were accepted as of Python 2.3, but we need a lot more
6 upstream and were accepted as of Python 2.3, but we need a lot more
7 functionality specific to IPython, so this module will continue to live as an
7 functionality specific to IPython, so this module will continue to live as an
8 IPython-specific utility.
8 IPython-specific utility.
9
9
10 Original rlcompleter documentation:
10 Original rlcompleter documentation:
11
11
12 This requires the latest extension to the readline module (the
12 This requires the latest extension to the readline module (the
13 completes keywords, built-ins and globals in __main__; when completing
13 completes keywords, built-ins and globals in __main__; when completing
14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
15 completes its attributes.
15 completes its attributes.
16
16
17 It's very cool to do "import string" type "string.", hit the
17 It's very cool to do "import string" type "string.", hit the
18 completion key (twice), and see the list of names defined by the
18 completion key (twice), and see the list of names defined by the
19 string module!
19 string module!
20
20
21 Tip: to use the tab key as the completion key, call
21 Tip: to use the tab key as the completion key, call
22
22
23 readline.parse_and_bind("tab: complete")
23 readline.parse_and_bind("tab: complete")
24
24
25 Notes:
25 Notes:
26
26
27 - Exceptions raised by the completer function are *ignored* (and
27 - Exceptions raised by the completer function are *ignored* (and
28 generally cause the completion to fail). This is a feature -- since
28 generally cause the completion to fail). This is a feature -- since
29 readline sets the tty device in raw (or cbreak) mode, printing a
29 readline sets the tty device in raw (or cbreak) mode, printing a
30 traceback wouldn't work well without some complicated hoopla to save,
30 traceback wouldn't work well without some complicated hoopla to save,
31 reset and restore the tty state.
31 reset and restore the tty state.
32
32
33 - The evaluation of the NAME.NAME... form may cause arbitrary
33 - The evaluation of the NAME.NAME... form may cause arbitrary
34 application defined code to be executed if an object with a
34 application defined code to be executed if an object with a
35 ``__getattr__`` hook is found. Since it is the responsibility of the
35 ``__getattr__`` hook is found. Since it is the responsibility of the
36 application (or the user) to enable this feature, I consider this an
36 application (or the user) to enable this feature, I consider this an
37 acceptable risk. More complicated expressions (e.g. function calls or
37 acceptable risk. More complicated expressions (e.g. function calls or
38 indexing operations) are *not* evaluated.
38 indexing operations) are *not* evaluated.
39
39
40 - GNU readline is also used by the built-in functions input() and
40 - GNU readline is also used by the built-in functions input() and
41 raw_input(), and thus these also benefit/suffer from the completer
41 raw_input(), and thus these also benefit/suffer from the completer
42 features. Clearly an interactive application can benefit by
42 features. Clearly an interactive application can benefit by
43 specifying its own completer function and using raw_input() for all
43 specifying its own completer function and using raw_input() for all
44 its input.
44 its input.
45
45
46 - When the original stdin is not a tty device, GNU readline is never
46 - When the original stdin is not a tty device, GNU readline is never
47 used, and this module (and the readline module) are silently inactive.
47 used, and this module (and the readline module) are silently inactive.
48 """
48 """
49
49
50 # Copyright (c) IPython Development Team.
50 # Copyright (c) IPython Development Team.
51 # Distributed under the terms of the Modified BSD License.
51 # Distributed under the terms of the Modified BSD License.
52 #
52 #
53 # Some of this code originated from rlcompleter in the Python standard library
53 # Some of this code originated from rlcompleter in the Python standard library
54 # Copyright (C) 2001 Python Software Foundation, www.python.org
54 # Copyright (C) 2001 Python Software Foundation, www.python.org
55
55
56 import __main__
56 import __main__
57 import glob
57 import glob
58 import inspect
58 import inspect
59 import itertools
59 import itertools
60 import keyword
60 import keyword
61 import os
61 import os
62 import re
62 import re
63 import sys
63 import sys
64 import unicodedata
64 import unicodedata
65 import string
65 import string
66
66
67 from traitlets.config.configurable import Configurable
67 from traitlets.config.configurable import Configurable
68 from IPython.core.error import TryNext
68 from IPython.core.error import TryNext
69 from IPython.core.inputsplitter import ESC_MAGIC
69 from IPython.core.inputsplitter import ESC_MAGIC
70 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
70 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
71 from IPython.utils import generics
71 from IPython.utils import generics
72 from IPython.utils import io
72 from IPython.utils import io
73 from IPython.utils.decorators import undoc
73 from IPython.utils.decorators import undoc
74 from IPython.utils.dir2 import dir2
74 from IPython.utils.dir2 import dir2, safe_hasattr
75 from IPython.utils.process import arg_split
75 from IPython.utils.process import arg_split
76 from IPython.utils.py3compat import builtin_mod, string_types, PY3
76 from IPython.utils.py3compat import builtin_mod, string_types, PY3
77 from traitlets import CBool, Enum
77 from traitlets import CBool, Enum
78
78
79 #-----------------------------------------------------------------------------
79 #-----------------------------------------------------------------------------
80 # Globals
80 # Globals
81 #-----------------------------------------------------------------------------
81 #-----------------------------------------------------------------------------
82
82
83 # Public API
83 # Public API
84 __all__ = ['Completer','IPCompleter']
84 __all__ = ['Completer','IPCompleter']
85
85
86 if sys.platform == 'win32':
86 if sys.platform == 'win32':
87 PROTECTABLES = ' '
87 PROTECTABLES = ' '
88 else:
88 else:
89 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
89 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
90
90
91
91
92 #-----------------------------------------------------------------------------
92 #-----------------------------------------------------------------------------
93 # Main functions and classes
93 # Main functions and classes
94 #-----------------------------------------------------------------------------
94 #-----------------------------------------------------------------------------
95
95
96 def has_open_quotes(s):
96 def has_open_quotes(s):
97 """Return whether a string has open quotes.
97 """Return whether a string has open quotes.
98
98
99 This simply counts whether the number of quote characters of either type in
99 This simply counts whether the number of quote characters of either type in
100 the string is odd.
100 the string is odd.
101
101
102 Returns
102 Returns
103 -------
103 -------
104 If there is an open quote, the quote character is returned. Else, return
104 If there is an open quote, the quote character is returned. Else, return
105 False.
105 False.
106 """
106 """
107 # We check " first, then ', so complex cases with nested quotes will get
107 # We check " first, then ', so complex cases with nested quotes will get
108 # the " to take precedence.
108 # the " to take precedence.
109 if s.count('"') % 2:
109 if s.count('"') % 2:
110 return '"'
110 return '"'
111 elif s.count("'") % 2:
111 elif s.count("'") % 2:
112 return "'"
112 return "'"
113 else:
113 else:
114 return False
114 return False
115
115
116
116
117 def protect_filename(s):
117 def protect_filename(s):
118 """Escape a string to protect certain characters."""
118 """Escape a string to protect certain characters."""
119
119
120 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
120 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
121 for ch in s])
121 for ch in s])
122
122
123 def expand_user(path):
123 def expand_user(path):
124 """Expand '~'-style usernames in strings.
124 """Expand '~'-style usernames in strings.
125
125
126 This is similar to :func:`os.path.expanduser`, but it computes and returns
126 This is similar to :func:`os.path.expanduser`, but it computes and returns
127 extra information that will be useful if the input was being used in
127 extra information that will be useful if the input was being used in
128 computing completions, and you wish to return the completions with the
128 computing completions, and you wish to return the completions with the
129 original '~' instead of its expanded value.
129 original '~' instead of its expanded value.
130
130
131 Parameters
131 Parameters
132 ----------
132 ----------
133 path : str
133 path : str
134 String to be expanded. If no ~ is present, the output is the same as the
134 String to be expanded. If no ~ is present, the output is the same as the
135 input.
135 input.
136
136
137 Returns
137 Returns
138 -------
138 -------
139 newpath : str
139 newpath : str
140 Result of ~ expansion in the input path.
140 Result of ~ expansion in the input path.
141 tilde_expand : bool
141 tilde_expand : bool
142 Whether any expansion was performed or not.
142 Whether any expansion was performed or not.
143 tilde_val : str
143 tilde_val : str
144 The value that ~ was replaced with.
144 The value that ~ was replaced with.
145 """
145 """
146 # Default values
146 # Default values
147 tilde_expand = False
147 tilde_expand = False
148 tilde_val = ''
148 tilde_val = ''
149 newpath = path
149 newpath = path
150
150
151 if path.startswith('~'):
151 if path.startswith('~'):
152 tilde_expand = True
152 tilde_expand = True
153 rest = len(path)-1
153 rest = len(path)-1
154 newpath = os.path.expanduser(path)
154 newpath = os.path.expanduser(path)
155 if rest:
155 if rest:
156 tilde_val = newpath[:-rest]
156 tilde_val = newpath[:-rest]
157 else:
157 else:
158 tilde_val = newpath
158 tilde_val = newpath
159
159
160 return newpath, tilde_expand, tilde_val
160 return newpath, tilde_expand, tilde_val
161
161
162
162
163 def compress_user(path, tilde_expand, tilde_val):
163 def compress_user(path, tilde_expand, tilde_val):
164 """Does the opposite of expand_user, with its outputs.
164 """Does the opposite of expand_user, with its outputs.
165 """
165 """
166 if tilde_expand:
166 if tilde_expand:
167 return path.replace(tilde_val, '~')
167 return path.replace(tilde_val, '~')
168 else:
168 else:
169 return path
169 return path
170
170
171
171
172
172
173 def completions_sorting_key(word):
173 def completions_sorting_key(word):
174 """key for sorting completions
174 """key for sorting completions
175
175
176 This does several things:
176 This does several things:
177
177
178 - Lowercase all completions, so they are sorted alphabetically with
178 - Lowercase all completions, so they are sorted alphabetically with
179 upper and lower case words mingled
179 upper and lower case words mingled
180 - Demote any completions starting with underscores to the end
180 - Demote any completions starting with underscores to the end
181 - Insert any %magic and %%cellmagic completions in the alphabetical order
181 - Insert any %magic and %%cellmagic completions in the alphabetical order
182 by their name
182 by their name
183 """
183 """
184 # Case insensitive sort
184 # Case insensitive sort
185 word = word.lower()
185 word = word.lower()
186
186
187 prio1, prio2 = 0, 0
187 prio1, prio2 = 0, 0
188
188
189 if word.startswith('__'):
189 if word.startswith('__'):
190 prio1 = 2
190 prio1 = 2
191 elif word.startswith('_'):
191 elif word.startswith('_'):
192 prio1 = 1
192 prio1 = 1
193
193
194 if word.startswith('%%'):
194 if word.startswith('%%'):
195 # If there's another % in there, this is something else, so leave it alone
195 # If there's another % in there, this is something else, so leave it alone
196 if not "%" in word[2:]:
196 if not "%" in word[2:]:
197 word = word[2:]
197 word = word[2:]
198 prio2 = 2
198 prio2 = 2
199 elif word.startswith('%'):
199 elif word.startswith('%'):
200 if not "%" in word[1:]:
200 if not "%" in word[1:]:
201 word = word[1:]
201 word = word[1:]
202 prio2 = 1
202 prio2 = 1
203
203
204 return prio1, word, prio2
204 return prio1, word, prio2
205
205
206
206
207 @undoc
207 @undoc
208 class Bunch(object): pass
208 class Bunch(object): pass
209
209
210
210
211 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
211 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
212 GREEDY_DELIMS = ' =\r\n'
212 GREEDY_DELIMS = ' =\r\n'
213
213
214
214
215 class CompletionSplitter(object):
215 class CompletionSplitter(object):
216 """An object to split an input line in a manner similar to readline.
216 """An object to split an input line in a manner similar to readline.
217
217
218 By having our own implementation, we can expose readline-like completion in
218 By having our own implementation, we can expose readline-like completion in
219 a uniform manner to all frontends. This object only needs to be given the
219 a uniform manner to all frontends. This object only needs to be given the
220 line of text to be split and the cursor position on said line, and it
220 line of text to be split and the cursor position on said line, and it
221 returns the 'word' to be completed on at the cursor after splitting the
221 returns the 'word' to be completed on at the cursor after splitting the
222 entire line.
222 entire line.
223
223
224 What characters are used as splitting delimiters can be controlled by
224 What characters are used as splitting delimiters can be controlled by
225 setting the `delims` attribute (this is a property that internally
225 setting the `delims` attribute (this is a property that internally
226 automatically builds the necessary regular expression)"""
226 automatically builds the necessary regular expression)"""
227
227
228 # Private interface
228 # Private interface
229
229
230 # A string of delimiter characters. The default value makes sense for
230 # A string of delimiter characters. The default value makes sense for
231 # IPython's most typical usage patterns.
231 # IPython's most typical usage patterns.
232 _delims = DELIMS
232 _delims = DELIMS
233
233
234 # The expression (a normal string) to be compiled into a regular expression
234 # The expression (a normal string) to be compiled into a regular expression
235 # for actual splitting. We store it as an attribute mostly for ease of
235 # for actual splitting. We store it as an attribute mostly for ease of
236 # debugging, since this type of code can be so tricky to debug.
236 # debugging, since this type of code can be so tricky to debug.
237 _delim_expr = None
237 _delim_expr = None
238
238
239 # The regular expression that does the actual splitting
239 # The regular expression that does the actual splitting
240 _delim_re = None
240 _delim_re = None
241
241
242 def __init__(self, delims=None):
242 def __init__(self, delims=None):
243 delims = CompletionSplitter._delims if delims is None else delims
243 delims = CompletionSplitter._delims if delims is None else delims
244 self.delims = delims
244 self.delims = delims
245
245
246 @property
246 @property
247 def delims(self):
247 def delims(self):
248 """Return the string of delimiter characters."""
248 """Return the string of delimiter characters."""
249 return self._delims
249 return self._delims
250
250
251 @delims.setter
251 @delims.setter
252 def delims(self, delims):
252 def delims(self, delims):
253 """Set the delimiters for line splitting."""
253 """Set the delimiters for line splitting."""
254 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
254 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
255 self._delim_re = re.compile(expr)
255 self._delim_re = re.compile(expr)
256 self._delims = delims
256 self._delims = delims
257 self._delim_expr = expr
257 self._delim_expr = expr
258
258
259 def split_line(self, line, cursor_pos=None):
259 def split_line(self, line, cursor_pos=None):
260 """Split a line of text with a cursor at the given position.
260 """Split a line of text with a cursor at the given position.
261 """
261 """
262 l = line if cursor_pos is None else line[:cursor_pos]
262 l = line if cursor_pos is None else line[:cursor_pos]
263 return self._delim_re.split(l)[-1]
263 return self._delim_re.split(l)[-1]
264
264
265
265
266 class Completer(Configurable):
266 class Completer(Configurable):
267
267
268 greedy = CBool(False, config=True,
268 greedy = CBool(False, config=True,
269 help="""Activate greedy completion
269 help="""Activate greedy completion
270
270
271 This will enable completion on elements of lists, results of function calls, etc.,
271 This will enable completion on elements of lists, results of function calls, etc.,
272 but can be unsafe because the code is actually evaluated on TAB.
272 but can be unsafe because the code is actually evaluated on TAB.
273 """
273 """
274 )
274 )
275
275
276
276
277 def __init__(self, namespace=None, global_namespace=None, **kwargs):
277 def __init__(self, namespace=None, global_namespace=None, **kwargs):
278 """Create a new completer for the command line.
278 """Create a new completer for the command line.
279
279
280 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
280 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
281
281
282 If unspecified, the default namespace where completions are performed
282 If unspecified, the default namespace where completions are performed
283 is __main__ (technically, __main__.__dict__). Namespaces should be
283 is __main__ (technically, __main__.__dict__). Namespaces should be
284 given as dictionaries.
284 given as dictionaries.
285
285
286 An optional second namespace can be given. This allows the completer
286 An optional second namespace can be given. This allows the completer
287 to handle cases where both the local and global scopes need to be
287 to handle cases where both the local and global scopes need to be
288 distinguished.
288 distinguished.
289
289
290 Completer instances should be used as the completion mechanism of
290 Completer instances should be used as the completion mechanism of
291 readline via the set_completer() call:
291 readline via the set_completer() call:
292
292
293 readline.set_completer(Completer(my_namespace).complete)
293 readline.set_completer(Completer(my_namespace).complete)
294 """
294 """
295
295
296 # Don't bind to namespace quite yet, but flag whether the user wants a
296 # Don't bind to namespace quite yet, but flag whether the user wants a
297 # specific namespace or to use __main__.__dict__. This will allow us
297 # specific namespace or to use __main__.__dict__. This will allow us
298 # to bind to __main__.__dict__ at completion time, not now.
298 # to bind to __main__.__dict__ at completion time, not now.
299 if namespace is None:
299 if namespace is None:
300 self.use_main_ns = 1
300 self.use_main_ns = 1
301 else:
301 else:
302 self.use_main_ns = 0
302 self.use_main_ns = 0
303 self.namespace = namespace
303 self.namespace = namespace
304
304
305 # The global namespace, if given, can be bound directly
305 # The global namespace, if given, can be bound directly
306 if global_namespace is None:
306 if global_namespace is None:
307 self.global_namespace = {}
307 self.global_namespace = {}
308 else:
308 else:
309 self.global_namespace = global_namespace
309 self.global_namespace = global_namespace
310
310
311 super(Completer, self).__init__(**kwargs)
311 super(Completer, self).__init__(**kwargs)
312
312
313 def complete(self, text, state):
313 def complete(self, text, state):
314 """Return the next possible completion for 'text'.
314 """Return the next possible completion for 'text'.
315
315
316 This is called successively with state == 0, 1, 2, ... until it
316 This is called successively with state == 0, 1, 2, ... until it
317 returns None. The completion should begin with 'text'.
317 returns None. The completion should begin with 'text'.
318
318
319 """
319 """
320 if self.use_main_ns:
320 if self.use_main_ns:
321 self.namespace = __main__.__dict__
321 self.namespace = __main__.__dict__
322
322
323 if state == 0:
323 if state == 0:
324 if "." in text:
324 if "." in text:
325 self.matches = self.attr_matches(text)
325 self.matches = self.attr_matches(text)
326 else:
326 else:
327 self.matches = self.global_matches(text)
327 self.matches = self.global_matches(text)
328 try:
328 try:
329 return self.matches[state]
329 return self.matches[state]
330 except IndexError:
330 except IndexError:
331 return None
331 return None
332
332
333 def global_matches(self, text):
333 def global_matches(self, text):
334 """Compute matches when text is a simple name.
334 """Compute matches when text is a simple name.
335
335
336 Return a list of all keywords, built-in functions and names currently
336 Return a list of all keywords, built-in functions and names currently
337 defined in self.namespace or self.global_namespace that match.
337 defined in self.namespace or self.global_namespace that match.
338
338
339 """
339 """
340 #print 'Completer->global_matches, txt=%r' % text # dbg
340 #print 'Completer->global_matches, txt=%r' % text # dbg
341 matches = []
341 matches = []
342 match_append = matches.append
342 match_append = matches.append
343 n = len(text)
343 n = len(text)
344 for lst in [keyword.kwlist,
344 for lst in [keyword.kwlist,
345 builtin_mod.__dict__.keys(),
345 builtin_mod.__dict__.keys(),
346 self.namespace.keys(),
346 self.namespace.keys(),
347 self.global_namespace.keys()]:
347 self.global_namespace.keys()]:
348 for word in lst:
348 for word in lst:
349 if word[:n] == text and word != "__builtins__":
349 if word[:n] == text and word != "__builtins__":
350 match_append(word)
350 match_append(word)
351 return matches
351 return matches
352
352
353 def attr_matches(self, text):
353 def attr_matches(self, text):
354 """Compute matches when text contains a dot.
354 """Compute matches when text contains a dot.
355
355
356 Assuming the text is of the form NAME.NAME....[NAME], and is
356 Assuming the text is of the form NAME.NAME....[NAME], and is
357 evaluatable in self.namespace or self.global_namespace, it will be
357 evaluatable in self.namespace or self.global_namespace, it will be
358 evaluated and its attributes (as revealed by dir()) are used as
358 evaluated and its attributes (as revealed by dir()) are used as
359 possible completions. (For class instances, class members are are
359 possible completions. (For class instances, class members are are
360 also considered.)
360 also considered.)
361
361
362 WARNING: this can still invoke arbitrary C code, if an object
362 WARNING: this can still invoke arbitrary C code, if an object
363 with a __getattr__ hook is evaluated.
363 with a __getattr__ hook is evaluated.
364
364
365 """
365 """
366
366
367 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
367 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
368 # Another option, seems to work great. Catches things like ''.<tab>
368 # Another option, seems to work great. Catches things like ''.<tab>
369 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
369 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
370
370
371 if m:
371 if m:
372 expr, attr = m.group(1, 3)
372 expr, attr = m.group(1, 3)
373 elif self.greedy:
373 elif self.greedy:
374 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
374 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
375 if not m2:
375 if not m2:
376 return []
376 return []
377 expr, attr = m2.group(1,2)
377 expr, attr = m2.group(1,2)
378 else:
378 else:
379 return []
379 return []
380
380
381 try:
381 try:
382 obj = eval(expr, self.namespace)
382 obj = eval(expr, self.namespace)
383 except:
383 except:
384 try:
384 try:
385 obj = eval(expr, self.global_namespace)
385 obj = eval(expr, self.global_namespace)
386 except:
386 except:
387 return []
387 return []
388
388
389 if self.limit_to__all__ and hasattr(obj, '__all__'):
389 if self.limit_to__all__ and hasattr(obj, '__all__'):
390 words = get__all__entries(obj)
390 words = get__all__entries(obj)
391 else:
391 else:
392 words = dir2(obj)
392 words = dir2(obj)
393
393
394 try:
394 try:
395 words = generics.complete_object(obj, words)
395 words = generics.complete_object(obj, words)
396 except TryNext:
396 except TryNext:
397 pass
397 pass
398 except Exception:
398 except Exception:
399 # Silence errors from completion function
399 # Silence errors from completion function
400 #raise # dbg
400 #raise # dbg
401 pass
401 pass
402 # Build match list to return
402 # Build match list to return
403 n = len(attr)
403 n = len(attr)
404 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
404 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
405 return res
405 return res
406
406
407
407
408 def get__all__entries(obj):
408 def get__all__entries(obj):
409 """returns the strings in the __all__ attribute"""
409 """returns the strings in the __all__ attribute"""
410 try:
410 try:
411 words = getattr(obj, '__all__')
411 words = getattr(obj, '__all__')
412 except:
412 except:
413 return []
413 return []
414
414
415 return [w for w in words if isinstance(w, string_types)]
415 return [w for w in words if isinstance(w, string_types)]
416
416
417
417
418 def match_dict_keys(keys, prefix, delims):
418 def match_dict_keys(keys, prefix, delims):
419 """Used by dict_key_matches, matching the prefix to a list of keys"""
419 """Used by dict_key_matches, matching the prefix to a list of keys"""
420 if not prefix:
420 if not prefix:
421 return None, 0, [repr(k) for k in keys
421 return None, 0, [repr(k) for k in keys
422 if isinstance(k, (string_types, bytes))]
422 if isinstance(k, (string_types, bytes))]
423 quote_match = re.search('["\']', prefix)
423 quote_match = re.search('["\']', prefix)
424 quote = quote_match.group()
424 quote = quote_match.group()
425 try:
425 try:
426 prefix_str = eval(prefix + quote, {})
426 prefix_str = eval(prefix + quote, {})
427 except Exception:
427 except Exception:
428 return None, 0, []
428 return None, 0, []
429
429
430 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
430 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
431 token_match = re.search(pattern, prefix, re.UNICODE)
431 token_match = re.search(pattern, prefix, re.UNICODE)
432 token_start = token_match.start()
432 token_start = token_match.start()
433 token_prefix = token_match.group()
433 token_prefix = token_match.group()
434
434
435 # TODO: support bytes in Py3k
435 # TODO: support bytes in Py3k
436 matched = []
436 matched = []
437 for key in keys:
437 for key in keys:
438 try:
438 try:
439 if not key.startswith(prefix_str):
439 if not key.startswith(prefix_str):
440 continue
440 continue
441 except (AttributeError, TypeError, UnicodeError):
441 except (AttributeError, TypeError, UnicodeError):
442 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
442 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
443 continue
443 continue
444
444
445 # reformat remainder of key to begin with prefix
445 # reformat remainder of key to begin with prefix
446 rem = key[len(prefix_str):]
446 rem = key[len(prefix_str):]
447 # force repr wrapped in '
447 # force repr wrapped in '
448 rem_repr = repr(rem + '"')
448 rem_repr = repr(rem + '"')
449 if rem_repr.startswith('u') and prefix[0] not in 'uU':
449 if rem_repr.startswith('u') and prefix[0] not in 'uU':
450 # Found key is unicode, but prefix is Py2 string.
450 # Found key is unicode, but prefix is Py2 string.
451 # Therefore attempt to interpret key as string.
451 # Therefore attempt to interpret key as string.
452 try:
452 try:
453 rem_repr = repr(rem.encode('ascii') + '"')
453 rem_repr = repr(rem.encode('ascii') + '"')
454 except UnicodeEncodeError:
454 except UnicodeEncodeError:
455 continue
455 continue
456
456
457 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
457 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
458 if quote == '"':
458 if quote == '"':
459 # The entered prefix is quoted with ",
459 # The entered prefix is quoted with ",
460 # but the match is quoted with '.
460 # but the match is quoted with '.
461 # A contained " hence needs escaping for comparison:
461 # A contained " hence needs escaping for comparison:
462 rem_repr = rem_repr.replace('"', '\\"')
462 rem_repr = rem_repr.replace('"', '\\"')
463
463
464 # then reinsert prefix from start of token
464 # then reinsert prefix from start of token
465 matched.append('%s%s' % (token_prefix, rem_repr))
465 matched.append('%s%s' % (token_prefix, rem_repr))
466 return quote, token_start, matched
466 return quote, token_start, matched
467
467
468
468
469 def _safe_isinstance(obj, module, class_name):
469 def _safe_isinstance(obj, module, class_name):
470 """Checks if obj is an instance of module.class_name if loaded
470 """Checks if obj is an instance of module.class_name if loaded
471 """
471 """
472 return (module in sys.modules and
472 return (module in sys.modules and
473 isinstance(obj, getattr(__import__(module), class_name)))
473 isinstance(obj, getattr(__import__(module), class_name)))
474
474
475 def _safe_really_hasattr(obj, name):
476 """Checks that an object genuinely has a given attribute.
477
478 Some objects claim to have any attribute that's requested, to act as a lazy
479 proxy for something else. We want to catch these cases and ignore their
480 claim to have the attribute we're interested in.
481 """
482 if safe_hasattr(obj, '_ipy_proxy_check_dont_define_this_'):
483 # If it claims this exists, don't trust it
484 return False
485
486 return safe_hasattr(obj, name)
475
487
476
488
477 def back_unicode_name_matches(text):
489 def back_unicode_name_matches(text):
478 u"""Match unicode characters back to unicode name
490 u"""Match unicode characters back to unicode name
479
491
480 This does β˜ƒ -> \\snowman
492 This does β˜ƒ -> \\snowman
481
493
482 Note that snowman is not a valid python3 combining character but will be expanded.
494 Note that snowman is not a valid python3 combining character but will be expanded.
483 Though it will not recombine back to the snowman character by the completion machinery.
495 Though it will not recombine back to the snowman character by the completion machinery.
484
496
485 This will not either back-complete standard sequences like \\n, \\b ...
497 This will not either back-complete standard sequences like \\n, \\b ...
486
498
487 Used on Python 3 only.
499 Used on Python 3 only.
488 """
500 """
489 if len(text)<2:
501 if len(text)<2:
490 return u'', ()
502 return u'', ()
491 maybe_slash = text[-2]
503 maybe_slash = text[-2]
492 if maybe_slash != '\\':
504 if maybe_slash != '\\':
493 return u'', ()
505 return u'', ()
494
506
495 char = text[-1]
507 char = text[-1]
496 # no expand on quote for completion in strings.
508 # no expand on quote for completion in strings.
497 # nor backcomplete standard ascii keys
509 # nor backcomplete standard ascii keys
498 if char in string.ascii_letters or char in ['"',"'"]:
510 if char in string.ascii_letters or char in ['"',"'"]:
499 return u'', ()
511 return u'', ()
500 try :
512 try :
501 unic = unicodedata.name(char)
513 unic = unicodedata.name(char)
502 return '\\'+char,['\\'+unic]
514 return '\\'+char,['\\'+unic]
503 except KeyError as e:
515 except KeyError as e:
504 pass
516 pass
505 return u'', ()
517 return u'', ()
506
518
507 def back_latex_name_matches(text):
519 def back_latex_name_matches(text):
508 u"""Match latex characters back to unicode name
520 u"""Match latex characters back to unicode name
509
521
510 This does ->\\sqrt
522 This does ->\\sqrt
511
523
512 Used on Python 3 only.
524 Used on Python 3 only.
513 """
525 """
514 if len(text)<2:
526 if len(text)<2:
515 return u'', ()
527 return u'', ()
516 maybe_slash = text[-2]
528 maybe_slash = text[-2]
517 if maybe_slash != '\\':
529 if maybe_slash != '\\':
518 return u'', ()
530 return u'', ()
519
531
520
532
521 char = text[-1]
533 char = text[-1]
522 # no expand on quote for completion in strings.
534 # no expand on quote for completion in strings.
523 # nor backcomplete standard ascii keys
535 # nor backcomplete standard ascii keys
524 if char in string.ascii_letters or char in ['"',"'"]:
536 if char in string.ascii_letters or char in ['"',"'"]:
525 return u'', ()
537 return u'', ()
526 try :
538 try :
527 latex = reverse_latex_symbol[char]
539 latex = reverse_latex_symbol[char]
528 # '\\' replace the \ as well
540 # '\\' replace the \ as well
529 return '\\'+char,[latex]
541 return '\\'+char,[latex]
530 except KeyError as e:
542 except KeyError as e:
531 pass
543 pass
532 return u'', ()
544 return u'', ()
533
545
534
546
535 class IPCompleter(Completer):
547 class IPCompleter(Completer):
536 """Extension of the completer class with IPython-specific features"""
548 """Extension of the completer class with IPython-specific features"""
537
549
538 def _greedy_changed(self, name, old, new):
550 def _greedy_changed(self, name, old, new):
539 """update the splitter and readline delims when greedy is changed"""
551 """update the splitter and readline delims when greedy is changed"""
540 if new:
552 if new:
541 self.splitter.delims = GREEDY_DELIMS
553 self.splitter.delims = GREEDY_DELIMS
542 else:
554 else:
543 self.splitter.delims = DELIMS
555 self.splitter.delims = DELIMS
544
556
545 if self.readline:
557 if self.readline:
546 self.readline.set_completer_delims(self.splitter.delims)
558 self.readline.set_completer_delims(self.splitter.delims)
547
559
548 merge_completions = CBool(True, config=True,
560 merge_completions = CBool(True, config=True,
549 help="""Whether to merge completion results into a single list
561 help="""Whether to merge completion results into a single list
550
562
551 If False, only the completion results from the first non-empty
563 If False, only the completion results from the first non-empty
552 completer will be returned.
564 completer will be returned.
553 """
565 """
554 )
566 )
555 omit__names = Enum((0,1,2), default_value=2, config=True,
567 omit__names = Enum((0,1,2), default_value=2, config=True,
556 help="""Instruct the completer to omit private method names
568 help="""Instruct the completer to omit private method names
557
569
558 Specifically, when completing on ``object.<tab>``.
570 Specifically, when completing on ``object.<tab>``.
559
571
560 When 2 [default]: all names that start with '_' will be excluded.
572 When 2 [default]: all names that start with '_' will be excluded.
561
573
562 When 1: all 'magic' names (``__foo__``) will be excluded.
574 When 1: all 'magic' names (``__foo__``) will be excluded.
563
575
564 When 0: nothing will be excluded.
576 When 0: nothing will be excluded.
565 """
577 """
566 )
578 )
567 limit_to__all__ = CBool(default_value=False, config=True,
579 limit_to__all__ = CBool(default_value=False, config=True,
568 help="""Instruct the completer to use __all__ for the completion
580 help="""Instruct the completer to use __all__ for the completion
569
581
570 Specifically, when completing on ``object.<tab>``.
582 Specifically, when completing on ``object.<tab>``.
571
583
572 When True: only those names in obj.__all__ will be included.
584 When True: only those names in obj.__all__ will be included.
573
585
574 When False [default]: the __all__ attribute is ignored
586 When False [default]: the __all__ attribute is ignored
575 """
587 """
576 )
588 )
577
589
578 def __init__(self, shell=None, namespace=None, global_namespace=None,
590 def __init__(self, shell=None, namespace=None, global_namespace=None,
579 use_readline=True, config=None, **kwargs):
591 use_readline=True, config=None, **kwargs):
580 """IPCompleter() -> completer
592 """IPCompleter() -> completer
581
593
582 Return a completer object suitable for use by the readline library
594 Return a completer object suitable for use by the readline library
583 via readline.set_completer().
595 via readline.set_completer().
584
596
585 Inputs:
597 Inputs:
586
598
587 - shell: a pointer to the ipython shell itself. This is needed
599 - shell: a pointer to the ipython shell itself. This is needed
588 because this completer knows about magic functions, and those can
600 because this completer knows about magic functions, and those can
589 only be accessed via the ipython instance.
601 only be accessed via the ipython instance.
590
602
591 - namespace: an optional dict where completions are performed.
603 - namespace: an optional dict where completions are performed.
592
604
593 - global_namespace: secondary optional dict for completions, to
605 - global_namespace: secondary optional dict for completions, to
594 handle cases (such as IPython embedded inside functions) where
606 handle cases (such as IPython embedded inside functions) where
595 both Python scopes are visible.
607 both Python scopes are visible.
596
608
597 use_readline : bool, optional
609 use_readline : bool, optional
598 If true, use the readline library. This completer can still function
610 If true, use the readline library. This completer can still function
599 without readline, though in that case callers must provide some extra
611 without readline, though in that case callers must provide some extra
600 information on each call about the current line."""
612 information on each call about the current line."""
601
613
602 self.magic_escape = ESC_MAGIC
614 self.magic_escape = ESC_MAGIC
603 self.splitter = CompletionSplitter()
615 self.splitter = CompletionSplitter()
604
616
605 # Readline configuration, only used by the rlcompleter method.
617 # Readline configuration, only used by the rlcompleter method.
606 if use_readline:
618 if use_readline:
607 # We store the right version of readline so that later code
619 # We store the right version of readline so that later code
608 import IPython.utils.rlineimpl as readline
620 import IPython.utils.rlineimpl as readline
609 self.readline = readline
621 self.readline = readline
610 else:
622 else:
611 self.readline = None
623 self.readline = None
612
624
613 # _greedy_changed() depends on splitter and readline being defined:
625 # _greedy_changed() depends on splitter and readline being defined:
614 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
626 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
615 config=config, **kwargs)
627 config=config, **kwargs)
616
628
617 # List where completion matches will be stored
629 # List where completion matches will be stored
618 self.matches = []
630 self.matches = []
619 self.shell = shell
631 self.shell = shell
620 # Regexp to split filenames with spaces in them
632 # Regexp to split filenames with spaces in them
621 self.space_name_re = re.compile(r'([^\\] )')
633 self.space_name_re = re.compile(r'([^\\] )')
622 # Hold a local ref. to glob.glob for speed
634 # Hold a local ref. to glob.glob for speed
623 self.glob = glob.glob
635 self.glob = glob.glob
624
636
625 # Determine if we are running on 'dumb' terminals, like (X)Emacs
637 # Determine if we are running on 'dumb' terminals, like (X)Emacs
626 # buffers, to avoid completion problems.
638 # buffers, to avoid completion problems.
627 term = os.environ.get('TERM','xterm')
639 term = os.environ.get('TERM','xterm')
628 self.dumb_terminal = term in ['dumb','emacs']
640 self.dumb_terminal = term in ['dumb','emacs']
629
641
630 # Special handling of backslashes needed in win32 platforms
642 # Special handling of backslashes needed in win32 platforms
631 if sys.platform == "win32":
643 if sys.platform == "win32":
632 self.clean_glob = self._clean_glob_win32
644 self.clean_glob = self._clean_glob_win32
633 else:
645 else:
634 self.clean_glob = self._clean_glob
646 self.clean_glob = self._clean_glob
635
647
636 #regexp to parse docstring for function signature
648 #regexp to parse docstring for function signature
637 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
649 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
638 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
650 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
639 #use this if positional argument name is also needed
651 #use this if positional argument name is also needed
640 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
652 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
641
653
642 # All active matcher routines for completion
654 # All active matcher routines for completion
643 self.matchers = [self.python_matches,
655 self.matchers = [self.python_matches,
644 self.file_matches,
656 self.file_matches,
645 self.magic_matches,
657 self.magic_matches,
646 self.python_func_kw_matches,
658 self.python_func_kw_matches,
647 self.dict_key_matches,
659 self.dict_key_matches,
648 ]
660 ]
649
661
650 def all_completions(self, text):
662 def all_completions(self, text):
651 """
663 """
652 Wrapper around the complete method for the benefit of emacs
664 Wrapper around the complete method for the benefit of emacs
653 and pydb.
665 and pydb.
654 """
666 """
655 return self.complete(text)[1]
667 return self.complete(text)[1]
656
668
657 def _clean_glob(self,text):
669 def _clean_glob(self,text):
658 return self.glob("%s*" % text)
670 return self.glob("%s*" % text)
659
671
660 def _clean_glob_win32(self,text):
672 def _clean_glob_win32(self,text):
661 return [f.replace("\\","/")
673 return [f.replace("\\","/")
662 for f in self.glob("%s*" % text)]
674 for f in self.glob("%s*" % text)]
663
675
664 def file_matches(self, text):
676 def file_matches(self, text):
665 """Match filenames, expanding ~USER type strings.
677 """Match filenames, expanding ~USER type strings.
666
678
667 Most of the seemingly convoluted logic in this completer is an
679 Most of the seemingly convoluted logic in this completer is an
668 attempt to handle filenames with spaces in them. And yet it's not
680 attempt to handle filenames with spaces in them. And yet it's not
669 quite perfect, because Python's readline doesn't expose all of the
681 quite perfect, because Python's readline doesn't expose all of the
670 GNU readline details needed for this to be done correctly.
682 GNU readline details needed for this to be done correctly.
671
683
672 For a filename with a space in it, the printed completions will be
684 For a filename with a space in it, the printed completions will be
673 only the parts after what's already been typed (instead of the
685 only the parts after what's already been typed (instead of the
674 full completions, as is normally done). I don't think with the
686 full completions, as is normally done). I don't think with the
675 current (as of Python 2.3) Python readline it's possible to do
687 current (as of Python 2.3) Python readline it's possible to do
676 better."""
688 better."""
677
689
678 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
690 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
679
691
680 # chars that require escaping with backslash - i.e. chars
692 # chars that require escaping with backslash - i.e. chars
681 # that readline treats incorrectly as delimiters, but we
693 # that readline treats incorrectly as delimiters, but we
682 # don't want to treat as delimiters in filename matching
694 # don't want to treat as delimiters in filename matching
683 # when escaped with backslash
695 # when escaped with backslash
684 if text.startswith('!'):
696 if text.startswith('!'):
685 text = text[1:]
697 text = text[1:]
686 text_prefix = '!'
698 text_prefix = '!'
687 else:
699 else:
688 text_prefix = ''
700 text_prefix = ''
689
701
690 text_until_cursor = self.text_until_cursor
702 text_until_cursor = self.text_until_cursor
691 # track strings with open quotes
703 # track strings with open quotes
692 open_quotes = has_open_quotes(text_until_cursor)
704 open_quotes = has_open_quotes(text_until_cursor)
693
705
694 if '(' in text_until_cursor or '[' in text_until_cursor:
706 if '(' in text_until_cursor or '[' in text_until_cursor:
695 lsplit = text
707 lsplit = text
696 else:
708 else:
697 try:
709 try:
698 # arg_split ~ shlex.split, but with unicode bugs fixed by us
710 # arg_split ~ shlex.split, but with unicode bugs fixed by us
699 lsplit = arg_split(text_until_cursor)[-1]
711 lsplit = arg_split(text_until_cursor)[-1]
700 except ValueError:
712 except ValueError:
701 # typically an unmatched ", or backslash without escaped char.
713 # typically an unmatched ", or backslash without escaped char.
702 if open_quotes:
714 if open_quotes:
703 lsplit = text_until_cursor.split(open_quotes)[-1]
715 lsplit = text_until_cursor.split(open_quotes)[-1]
704 else:
716 else:
705 return []
717 return []
706 except IndexError:
718 except IndexError:
707 # tab pressed on empty line
719 # tab pressed on empty line
708 lsplit = ""
720 lsplit = ""
709
721
710 if not open_quotes and lsplit != protect_filename(lsplit):
722 if not open_quotes and lsplit != protect_filename(lsplit):
711 # if protectables are found, do matching on the whole escaped name
723 # if protectables are found, do matching on the whole escaped name
712 has_protectables = True
724 has_protectables = True
713 text0,text = text,lsplit
725 text0,text = text,lsplit
714 else:
726 else:
715 has_protectables = False
727 has_protectables = False
716 text = os.path.expanduser(text)
728 text = os.path.expanduser(text)
717
729
718 if text == "":
730 if text == "":
719 return [text_prefix + protect_filename(f) for f in self.glob("*")]
731 return [text_prefix + protect_filename(f) for f in self.glob("*")]
720
732
721 # Compute the matches from the filesystem
733 # Compute the matches from the filesystem
722 m0 = self.clean_glob(text.replace('\\',''))
734 m0 = self.clean_glob(text.replace('\\',''))
723
735
724 if has_protectables:
736 if has_protectables:
725 # If we had protectables, we need to revert our changes to the
737 # If we had protectables, we need to revert our changes to the
726 # beginning of filename so that we don't double-write the part
738 # beginning of filename so that we don't double-write the part
727 # of the filename we have so far
739 # of the filename we have so far
728 len_lsplit = len(lsplit)
740 len_lsplit = len(lsplit)
729 matches = [text_prefix + text0 +
741 matches = [text_prefix + text0 +
730 protect_filename(f[len_lsplit:]) for f in m0]
742 protect_filename(f[len_lsplit:]) for f in m0]
731 else:
743 else:
732 if open_quotes:
744 if open_quotes:
733 # if we have a string with an open quote, we don't need to
745 # if we have a string with an open quote, we don't need to
734 # protect the names at all (and we _shouldn't_, as it
746 # protect the names at all (and we _shouldn't_, as it
735 # would cause bugs when the filesystem call is made).
747 # would cause bugs when the filesystem call is made).
736 matches = m0
748 matches = m0
737 else:
749 else:
738 matches = [text_prefix +
750 matches = [text_prefix +
739 protect_filename(f) for f in m0]
751 protect_filename(f) for f in m0]
740
752
741 #io.rprint('mm', matches) # dbg
753 #io.rprint('mm', matches) # dbg
742
754
743 # Mark directories in input list by appending '/' to their names.
755 # Mark directories in input list by appending '/' to their names.
744 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
756 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
745 return matches
757 return matches
746
758
747 def magic_matches(self, text):
759 def magic_matches(self, text):
748 """Match magics"""
760 """Match magics"""
749 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
761 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
750 # Get all shell magics now rather than statically, so magics loaded at
762 # Get all shell magics now rather than statically, so magics loaded at
751 # runtime show up too.
763 # runtime show up too.
752 lsm = self.shell.magics_manager.lsmagic()
764 lsm = self.shell.magics_manager.lsmagic()
753 line_magics = lsm['line']
765 line_magics = lsm['line']
754 cell_magics = lsm['cell']
766 cell_magics = lsm['cell']
755 pre = self.magic_escape
767 pre = self.magic_escape
756 pre2 = pre+pre
768 pre2 = pre+pre
757
769
758 # Completion logic:
770 # Completion logic:
759 # - user gives %%: only do cell magics
771 # - user gives %%: only do cell magics
760 # - user gives %: do both line and cell magics
772 # - user gives %: do both line and cell magics
761 # - no prefix: do both
773 # - no prefix: do both
762 # In other words, line magics are skipped if the user gives %% explicitly
774 # In other words, line magics are skipped if the user gives %% explicitly
763 bare_text = text.lstrip(pre)
775 bare_text = text.lstrip(pre)
764 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
776 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
765 if not text.startswith(pre2):
777 if not text.startswith(pre2):
766 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
778 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
767 return comp
779 return comp
768
780
769 def python_matches(self,text):
781 def python_matches(self,text):
770 """Match attributes or global python names"""
782 """Match attributes or global python names"""
771
783
772 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
784 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
773 if "." in text:
785 if "." in text:
774 try:
786 try:
775 matches = self.attr_matches(text)
787 matches = self.attr_matches(text)
776 if text.endswith('.') and self.omit__names:
788 if text.endswith('.') and self.omit__names:
777 if self.omit__names == 1:
789 if self.omit__names == 1:
778 # true if txt is _not_ a __ name, false otherwise:
790 # true if txt is _not_ a __ name, false otherwise:
779 no__name = (lambda txt:
791 no__name = (lambda txt:
780 re.match(r'.*\.__.*?__',txt) is None)
792 re.match(r'.*\.__.*?__',txt) is None)
781 else:
793 else:
782 # true if txt is _not_ a _ name, false otherwise:
794 # true if txt is _not_ a _ name, false otherwise:
783 no__name = (lambda txt:
795 no__name = (lambda txt:
784 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
796 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
785 matches = filter(no__name, matches)
797 matches = filter(no__name, matches)
786 except NameError:
798 except NameError:
787 # catches <undefined attributes>.<tab>
799 # catches <undefined attributes>.<tab>
788 matches = []
800 matches = []
789 else:
801 else:
790 matches = self.global_matches(text)
802 matches = self.global_matches(text)
791
803
792 return matches
804 return matches
793
805
794 def _default_arguments_from_docstring(self, doc):
806 def _default_arguments_from_docstring(self, doc):
795 """Parse the first line of docstring for call signature.
807 """Parse the first line of docstring for call signature.
796
808
797 Docstring should be of the form 'min(iterable[, key=func])\n'.
809 Docstring should be of the form 'min(iterable[, key=func])\n'.
798 It can also parse cython docstring of the form
810 It can also parse cython docstring of the form
799 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
811 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
800 """
812 """
801 if doc is None:
813 if doc is None:
802 return []
814 return []
803
815
804 #care only the firstline
816 #care only the firstline
805 line = doc.lstrip().splitlines()[0]
817 line = doc.lstrip().splitlines()[0]
806
818
807 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
819 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
808 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
820 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
809 sig = self.docstring_sig_re.search(line)
821 sig = self.docstring_sig_re.search(line)
810 if sig is None:
822 if sig is None:
811 return []
823 return []
812 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
824 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
813 sig = sig.groups()[0].split(',')
825 sig = sig.groups()[0].split(',')
814 ret = []
826 ret = []
815 for s in sig:
827 for s in sig:
816 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
828 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
817 ret += self.docstring_kwd_re.findall(s)
829 ret += self.docstring_kwd_re.findall(s)
818 return ret
830 return ret
819
831
820 def _default_arguments(self, obj):
832 def _default_arguments(self, obj):
821 """Return the list of default arguments of obj if it is callable,
833 """Return the list of default arguments of obj if it is callable,
822 or empty list otherwise."""
834 or empty list otherwise."""
823 call_obj = obj
835 call_obj = obj
824 ret = []
836 ret = []
825 if inspect.isbuiltin(obj):
837 if inspect.isbuiltin(obj):
826 pass
838 pass
827 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
839 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
828 if inspect.isclass(obj):
840 if inspect.isclass(obj):
829 #for cython embededsignature=True the constructor docstring
841 #for cython embededsignature=True the constructor docstring
830 #belongs to the object itself not __init__
842 #belongs to the object itself not __init__
831 ret += self._default_arguments_from_docstring(
843 ret += self._default_arguments_from_docstring(
832 getattr(obj, '__doc__', ''))
844 getattr(obj, '__doc__', ''))
833 # for classes, check for __init__,__new__
845 # for classes, check for __init__,__new__
834 call_obj = (getattr(obj, '__init__', None) or
846 call_obj = (getattr(obj, '__init__', None) or
835 getattr(obj, '__new__', None))
847 getattr(obj, '__new__', None))
836 # for all others, check if they are __call__able
848 # for all others, check if they are __call__able
837 elif hasattr(obj, '__call__'):
849 elif hasattr(obj, '__call__'):
838 call_obj = obj.__call__
850 call_obj = obj.__call__
839 ret += self._default_arguments_from_docstring(
851 ret += self._default_arguments_from_docstring(
840 getattr(call_obj, '__doc__', ''))
852 getattr(call_obj, '__doc__', ''))
841
853
842 if PY3:
854 if PY3:
843 _keeps = (inspect.Parameter.KEYWORD_ONLY,
855 _keeps = (inspect.Parameter.KEYWORD_ONLY,
844 inspect.Parameter.POSITIONAL_OR_KEYWORD)
856 inspect.Parameter.POSITIONAL_OR_KEYWORD)
845 signature = inspect.signature
857 signature = inspect.signature
846 else:
858 else:
847 import IPython.utils.signatures
859 import IPython.utils.signatures
848 _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
860 _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
849 IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
861 IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
850 signature = IPython.utils.signatures.signature
862 signature = IPython.utils.signatures.signature
851
863
852 try:
864 try:
853 sig = signature(call_obj)
865 sig = signature(call_obj)
854 ret.extend(k for k, v in sig.parameters.items() if
866 ret.extend(k for k, v in sig.parameters.items() if
855 v.kind in _keeps)
867 v.kind in _keeps)
856 except ValueError:
868 except ValueError:
857 pass
869 pass
858
870
859 return list(set(ret))
871 return list(set(ret))
860
872
861 def python_func_kw_matches(self,text):
873 def python_func_kw_matches(self,text):
862 """Match named parameters (kwargs) of the last open function"""
874 """Match named parameters (kwargs) of the last open function"""
863
875
864 if "." in text: # a parameter cannot be dotted
876 if "." in text: # a parameter cannot be dotted
865 return []
877 return []
866 try: regexp = self.__funcParamsRegex
878 try: regexp = self.__funcParamsRegex
867 except AttributeError:
879 except AttributeError:
868 regexp = self.__funcParamsRegex = re.compile(r'''
880 regexp = self.__funcParamsRegex = re.compile(r'''
869 '.*?(?<!\\)' | # single quoted strings or
881 '.*?(?<!\\)' | # single quoted strings or
870 ".*?(?<!\\)" | # double quoted strings or
882 ".*?(?<!\\)" | # double quoted strings or
871 \w+ | # identifier
883 \w+ | # identifier
872 \S # other characters
884 \S # other characters
873 ''', re.VERBOSE | re.DOTALL)
885 ''', re.VERBOSE | re.DOTALL)
874 # 1. find the nearest identifier that comes before an unclosed
886 # 1. find the nearest identifier that comes before an unclosed
875 # parenthesis before the cursor
887 # parenthesis before the cursor
876 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
888 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
877 tokens = regexp.findall(self.text_until_cursor)
889 tokens = regexp.findall(self.text_until_cursor)
878 tokens.reverse()
890 tokens.reverse()
879 iterTokens = iter(tokens); openPar = 0
891 iterTokens = iter(tokens); openPar = 0
880
892
881 for token in iterTokens:
893 for token in iterTokens:
882 if token == ')':
894 if token == ')':
883 openPar -= 1
895 openPar -= 1
884 elif token == '(':
896 elif token == '(':
885 openPar += 1
897 openPar += 1
886 if openPar > 0:
898 if openPar > 0:
887 # found the last unclosed parenthesis
899 # found the last unclosed parenthesis
888 break
900 break
889 else:
901 else:
890 return []
902 return []
891 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
903 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
892 ids = []
904 ids = []
893 isId = re.compile(r'\w+$').match
905 isId = re.compile(r'\w+$').match
894
906
895 while True:
907 while True:
896 try:
908 try:
897 ids.append(next(iterTokens))
909 ids.append(next(iterTokens))
898 if not isId(ids[-1]):
910 if not isId(ids[-1]):
899 ids.pop(); break
911 ids.pop(); break
900 if not next(iterTokens) == '.':
912 if not next(iterTokens) == '.':
901 break
913 break
902 except StopIteration:
914 except StopIteration:
903 break
915 break
904 # lookup the candidate callable matches either using global_matches
916 # lookup the candidate callable matches either using global_matches
905 # or attr_matches for dotted names
917 # or attr_matches for dotted names
906 if len(ids) == 1:
918 if len(ids) == 1:
907 callableMatches = self.global_matches(ids[0])
919 callableMatches = self.global_matches(ids[0])
908 else:
920 else:
909 callableMatches = self.attr_matches('.'.join(ids[::-1]))
921 callableMatches = self.attr_matches('.'.join(ids[::-1]))
910 argMatches = []
922 argMatches = []
911 for callableMatch in callableMatches:
923 for callableMatch in callableMatches:
912 try:
924 try:
913 namedArgs = self._default_arguments(eval(callableMatch,
925 namedArgs = self._default_arguments(eval(callableMatch,
914 self.namespace))
926 self.namespace))
915 except:
927 except:
916 continue
928 continue
917
929
918 for namedArg in namedArgs:
930 for namedArg in namedArgs:
919 if namedArg.startswith(text):
931 if namedArg.startswith(text):
920 argMatches.append("%s=" %namedArg)
932 argMatches.append("%s=" %namedArg)
921 return argMatches
933 return argMatches
922
934
923 def dict_key_matches(self, text):
935 def dict_key_matches(self, text):
924 "Match string keys in a dictionary, after e.g. 'foo[' "
936 "Match string keys in a dictionary, after e.g. 'foo[' "
925 def get_keys(obj):
937 def get_keys(obj):
926 # Only allow completion for known in-memory dict-like types
938 # Objects can define their own completions by defining an
939 # _ipy_key_completions_() method.
940 if _safe_really_hasattr(obj, '_ipy_key_completions_'):
941 return obj._ipy_key_completions_()
942
943 # Special case some common in-memory dict-like types
927 if isinstance(obj, dict) or\
944 if isinstance(obj, dict) or\
928 _safe_isinstance(obj, 'pandas', 'DataFrame'):
945 _safe_isinstance(obj, 'pandas', 'DataFrame'):
929 try:
946 try:
930 return list(obj.keys())
947 return list(obj.keys())
931 except Exception:
948 except Exception:
932 return []
949 return []
933 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
950 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
934 _safe_isinstance(obj, 'numpy', 'void'):
951 _safe_isinstance(obj, 'numpy', 'void'):
935 return obj.dtype.names or []
952 return obj.dtype.names or []
936 return []
953 return []
937
954
938 try:
955 try:
939 regexps = self.__dict_key_regexps
956 regexps = self.__dict_key_regexps
940 except AttributeError:
957 except AttributeError:
941 dict_key_re_fmt = r'''(?x)
958 dict_key_re_fmt = r'''(?x)
942 ( # match dict-referring expression wrt greedy setting
959 ( # match dict-referring expression wrt greedy setting
943 %s
960 %s
944 )
961 )
945 \[ # open bracket
962 \[ # open bracket
946 \s* # and optional whitespace
963 \s* # and optional whitespace
947 ([uUbB]? # string prefix (r not handled)
964 ([uUbB]? # string prefix (r not handled)
948 (?: # unclosed string
965 (?: # unclosed string
949 '(?:[^']|(?<!\\)\\')*
966 '(?:[^']|(?<!\\)\\')*
950 |
967 |
951 "(?:[^"]|(?<!\\)\\")*
968 "(?:[^"]|(?<!\\)\\")*
952 )
969 )
953 )?
970 )?
954 $
971 $
955 '''
972 '''
956 regexps = self.__dict_key_regexps = {
973 regexps = self.__dict_key_regexps = {
957 False: re.compile(dict_key_re_fmt % '''
974 False: re.compile(dict_key_re_fmt % '''
958 # identifiers separated by .
975 # identifiers separated by .
959 (?!\d)\w+
976 (?!\d)\w+
960 (?:\.(?!\d)\w+)*
977 (?:\.(?!\d)\w+)*
961 '''),
978 '''),
962 True: re.compile(dict_key_re_fmt % '''
979 True: re.compile(dict_key_re_fmt % '''
963 .+
980 .+
964 ''')
981 ''')
965 }
982 }
966
983
967 match = regexps[self.greedy].search(self.text_until_cursor)
984 match = regexps[self.greedy].search(self.text_until_cursor)
968 if match is None:
985 if match is None:
969 return []
986 return []
970
987
971 expr, prefix = match.groups()
988 expr, prefix = match.groups()
972 try:
989 try:
973 obj = eval(expr, self.namespace)
990 obj = eval(expr, self.namespace)
974 except Exception:
991 except Exception:
975 try:
992 try:
976 obj = eval(expr, self.global_namespace)
993 obj = eval(expr, self.global_namespace)
977 except Exception:
994 except Exception:
978 return []
995 return []
979
996
980 keys = get_keys(obj)
997 keys = get_keys(obj)
981 if not keys:
998 if not keys:
982 return keys
999 return keys
983 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1000 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
984 if not matches:
1001 if not matches:
985 return matches
1002 return matches
986
1003
987 # get the cursor position of
1004 # get the cursor position of
988 # - the text being completed
1005 # - the text being completed
989 # - the start of the key text
1006 # - the start of the key text
990 # - the start of the completion
1007 # - the start of the completion
991 text_start = len(self.text_until_cursor) - len(text)
1008 text_start = len(self.text_until_cursor) - len(text)
992 if prefix:
1009 if prefix:
993 key_start = match.start(2)
1010 key_start = match.start(2)
994 completion_start = key_start + token_offset
1011 completion_start = key_start + token_offset
995 else:
1012 else:
996 key_start = completion_start = match.end()
1013 key_start = completion_start = match.end()
997
1014
998 # grab the leading prefix, to make sure all completions start with `text`
1015 # grab the leading prefix, to make sure all completions start with `text`
999 if text_start > key_start:
1016 if text_start > key_start:
1000 leading = ''
1017 leading = ''
1001 else:
1018 else:
1002 leading = text[text_start:completion_start]
1019 leading = text[text_start:completion_start]
1003
1020
1004 # the index of the `[` character
1021 # the index of the `[` character
1005 bracket_idx = match.end(1)
1022 bracket_idx = match.end(1)
1006
1023
1007 # append closing quote and bracket as appropriate
1024 # append closing quote and bracket as appropriate
1008 # this is *not* appropriate if the opening quote or bracket is outside
1025 # this is *not* appropriate if the opening quote or bracket is outside
1009 # the text given to this method
1026 # the text given to this method
1010 suf = ''
1027 suf = ''
1011 continuation = self.line_buffer[len(self.text_until_cursor):]
1028 continuation = self.line_buffer[len(self.text_until_cursor):]
1012 if key_start > text_start and closing_quote:
1029 if key_start > text_start and closing_quote:
1013 # quotes were opened inside text, maybe close them
1030 # quotes were opened inside text, maybe close them
1014 if continuation.startswith(closing_quote):
1031 if continuation.startswith(closing_quote):
1015 continuation = continuation[len(closing_quote):]
1032 continuation = continuation[len(closing_quote):]
1016 else:
1033 else:
1017 suf += closing_quote
1034 suf += closing_quote
1018 if bracket_idx > text_start:
1035 if bracket_idx > text_start:
1019 # brackets were opened inside text, maybe close them
1036 # brackets were opened inside text, maybe close them
1020 if not continuation.startswith(']'):
1037 if not continuation.startswith(']'):
1021 suf += ']'
1038 suf += ']'
1022
1039
1023 return [leading + k + suf for k in matches]
1040 return [leading + k + suf for k in matches]
1024
1041
1025 def unicode_name_matches(self, text):
1042 def unicode_name_matches(self, text):
1026 u"""Match Latex-like syntax for unicode characters base
1043 u"""Match Latex-like syntax for unicode characters base
1027 on the name of the character.
1044 on the name of the character.
1028
1045
1029 This does \\GREEK SMALL LETTER ETA -> Ξ·
1046 This does \\GREEK SMALL LETTER ETA -> Ξ·
1030
1047
1031 Works only on valid python 3 identifier, or on combining characters that
1048 Works only on valid python 3 identifier, or on combining characters that
1032 will combine to form a valid identifier.
1049 will combine to form a valid identifier.
1033
1050
1034 Used on Python 3 only.
1051 Used on Python 3 only.
1035 """
1052 """
1036 slashpos = text.rfind('\\')
1053 slashpos = text.rfind('\\')
1037 if slashpos > -1:
1054 if slashpos > -1:
1038 s = text[slashpos+1:]
1055 s = text[slashpos+1:]
1039 try :
1056 try :
1040 unic = unicodedata.lookup(s)
1057 unic = unicodedata.lookup(s)
1041 # allow combining chars
1058 # allow combining chars
1042 if ('a'+unic).isidentifier():
1059 if ('a'+unic).isidentifier():
1043 return '\\'+s,[unic]
1060 return '\\'+s,[unic]
1044 except KeyError as e:
1061 except KeyError as e:
1045 pass
1062 pass
1046 return u'', []
1063 return u'', []
1047
1064
1048
1065
1049
1066
1050
1067
1051 def latex_matches(self, text):
1068 def latex_matches(self, text):
1052 u"""Match Latex syntax for unicode characters.
1069 u"""Match Latex syntax for unicode characters.
1053
1070
1054 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1071 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1055
1072
1056 Used on Python 3 only.
1073 Used on Python 3 only.
1057 """
1074 """
1058 slashpos = text.rfind('\\')
1075 slashpos = text.rfind('\\')
1059 if slashpos > -1:
1076 if slashpos > -1:
1060 s = text[slashpos:]
1077 s = text[slashpos:]
1061 if s in latex_symbols:
1078 if s in latex_symbols:
1062 # Try to complete a full latex symbol to unicode
1079 # Try to complete a full latex symbol to unicode
1063 # \\alpha -> Ξ±
1080 # \\alpha -> Ξ±
1064 return s, [latex_symbols[s]]
1081 return s, [latex_symbols[s]]
1065 else:
1082 else:
1066 # If a user has partially typed a latex symbol, give them
1083 # If a user has partially typed a latex symbol, give them
1067 # a full list of options \al -> [\aleph, \alpha]
1084 # a full list of options \al -> [\aleph, \alpha]
1068 matches = [k for k in latex_symbols if k.startswith(s)]
1085 matches = [k for k in latex_symbols if k.startswith(s)]
1069 return s, matches
1086 return s, matches
1070 return u'', []
1087 return u'', []
1071
1088
1072 def dispatch_custom_completer(self, text):
1089 def dispatch_custom_completer(self, text):
1073 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
1090 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
1074 line = self.line_buffer
1091 line = self.line_buffer
1075 if not line.strip():
1092 if not line.strip():
1076 return None
1093 return None
1077
1094
1078 # Create a little structure to pass all the relevant information about
1095 # Create a little structure to pass all the relevant information about
1079 # the current completion to any custom completer.
1096 # the current completion to any custom completer.
1080 event = Bunch()
1097 event = Bunch()
1081 event.line = line
1098 event.line = line
1082 event.symbol = text
1099 event.symbol = text
1083 cmd = line.split(None,1)[0]
1100 cmd = line.split(None,1)[0]
1084 event.command = cmd
1101 event.command = cmd
1085 event.text_until_cursor = self.text_until_cursor
1102 event.text_until_cursor = self.text_until_cursor
1086
1103
1087 #print "\ncustom:{%s]\n" % event # dbg
1104 #print "\ncustom:{%s]\n" % event # dbg
1088
1105
1089 # for foo etc, try also to find completer for %foo
1106 # for foo etc, try also to find completer for %foo
1090 if not cmd.startswith(self.magic_escape):
1107 if not cmd.startswith(self.magic_escape):
1091 try_magic = self.custom_completers.s_matches(
1108 try_magic = self.custom_completers.s_matches(
1092 self.magic_escape + cmd)
1109 self.magic_escape + cmd)
1093 else:
1110 else:
1094 try_magic = []
1111 try_magic = []
1095
1112
1096 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1113 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1097 try_magic,
1114 try_magic,
1098 self.custom_completers.flat_matches(self.text_until_cursor)):
1115 self.custom_completers.flat_matches(self.text_until_cursor)):
1099 #print "try",c # dbg
1116 #print "try",c # dbg
1100 try:
1117 try:
1101 res = c(event)
1118 res = c(event)
1102 if res:
1119 if res:
1103 # first, try case sensitive match
1120 # first, try case sensitive match
1104 withcase = [r for r in res if r.startswith(text)]
1121 withcase = [r for r in res if r.startswith(text)]
1105 if withcase:
1122 if withcase:
1106 return withcase
1123 return withcase
1107 # if none, then case insensitive ones are ok too
1124 # if none, then case insensitive ones are ok too
1108 text_low = text.lower()
1125 text_low = text.lower()
1109 return [r for r in res if r.lower().startswith(text_low)]
1126 return [r for r in res if r.lower().startswith(text_low)]
1110 except TryNext:
1127 except TryNext:
1111 pass
1128 pass
1112
1129
1113 return None
1130 return None
1114
1131
1115 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1132 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1116 """Find completions for the given text and line context.
1133 """Find completions for the given text and line context.
1117
1134
1118 Note that both the text and the line_buffer are optional, but at least
1135 Note that both the text and the line_buffer are optional, but at least
1119 one of them must be given.
1136 one of them must be given.
1120
1137
1121 Parameters
1138 Parameters
1122 ----------
1139 ----------
1123 text : string, optional
1140 text : string, optional
1124 Text to perform the completion on. If not given, the line buffer
1141 Text to perform the completion on. If not given, the line buffer
1125 is split using the instance's CompletionSplitter object.
1142 is split using the instance's CompletionSplitter object.
1126
1143
1127 line_buffer : string, optional
1144 line_buffer : string, optional
1128 If not given, the completer attempts to obtain the current line
1145 If not given, the completer attempts to obtain the current line
1129 buffer via readline. This keyword allows clients which are
1146 buffer via readline. This keyword allows clients which are
1130 requesting for text completions in non-readline contexts to inform
1147 requesting for text completions in non-readline contexts to inform
1131 the completer of the entire text.
1148 the completer of the entire text.
1132
1149
1133 cursor_pos : int, optional
1150 cursor_pos : int, optional
1134 Index of the cursor in the full line buffer. Should be provided by
1151 Index of the cursor in the full line buffer. Should be provided by
1135 remote frontends where kernel has no access to frontend state.
1152 remote frontends where kernel has no access to frontend state.
1136
1153
1137 Returns
1154 Returns
1138 -------
1155 -------
1139 text : str
1156 text : str
1140 Text that was actually used in the completion.
1157 Text that was actually used in the completion.
1141
1158
1142 matches : list
1159 matches : list
1143 A list of completion matches.
1160 A list of completion matches.
1144 """
1161 """
1145 # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1162 # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1146
1163
1147 # if the cursor position isn't given, the only sane assumption we can
1164 # if the cursor position isn't given, the only sane assumption we can
1148 # make is that it's at the end of the line (the common case)
1165 # make is that it's at the end of the line (the common case)
1149 if cursor_pos is None:
1166 if cursor_pos is None:
1150 cursor_pos = len(line_buffer) if text is None else len(text)
1167 cursor_pos = len(line_buffer) if text is None else len(text)
1151
1168
1152 if PY3:
1169 if PY3:
1153
1170
1154 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1171 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1155 latex_text, latex_matches = self.latex_matches(base_text)
1172 latex_text, latex_matches = self.latex_matches(base_text)
1156 if latex_matches:
1173 if latex_matches:
1157 return latex_text, latex_matches
1174 return latex_text, latex_matches
1158 name_text = ''
1175 name_text = ''
1159 name_matches = []
1176 name_matches = []
1160 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1177 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1161 name_text, name_matches = meth(base_text)
1178 name_text, name_matches = meth(base_text)
1162 if name_text:
1179 if name_text:
1163 return name_text, name_matches
1180 return name_text, name_matches
1164
1181
1165 # if text is either None or an empty string, rely on the line buffer
1182 # if text is either None or an empty string, rely on the line buffer
1166 if not text:
1183 if not text:
1167 text = self.splitter.split_line(line_buffer, cursor_pos)
1184 text = self.splitter.split_line(line_buffer, cursor_pos)
1168
1185
1169 # If no line buffer is given, assume the input text is all there was
1186 # If no line buffer is given, assume the input text is all there was
1170 if line_buffer is None:
1187 if line_buffer is None:
1171 line_buffer = text
1188 line_buffer = text
1172
1189
1173 self.line_buffer = line_buffer
1190 self.line_buffer = line_buffer
1174 self.text_until_cursor = self.line_buffer[:cursor_pos]
1191 self.text_until_cursor = self.line_buffer[:cursor_pos]
1175 # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1192 # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1176
1193
1177 # Start with a clean slate of completions
1194 # Start with a clean slate of completions
1178 self.matches[:] = []
1195 self.matches[:] = []
1179 custom_res = self.dispatch_custom_completer(text)
1196 custom_res = self.dispatch_custom_completer(text)
1180 if custom_res is not None:
1197 if custom_res is not None:
1181 # did custom completers produce something?
1198 # did custom completers produce something?
1182 self.matches = custom_res
1199 self.matches = custom_res
1183 else:
1200 else:
1184 # Extend the list of completions with the results of each
1201 # Extend the list of completions with the results of each
1185 # matcher, so we return results to the user from all
1202 # matcher, so we return results to the user from all
1186 # namespaces.
1203 # namespaces.
1187 if self.merge_completions:
1204 if self.merge_completions:
1188 self.matches = []
1205 self.matches = []
1189 for matcher in self.matchers:
1206 for matcher in self.matchers:
1190 try:
1207 try:
1191 self.matches.extend(matcher(text))
1208 self.matches.extend(matcher(text))
1192 except:
1209 except:
1193 # Show the ugly traceback if the matcher causes an
1210 # Show the ugly traceback if the matcher causes an
1194 # exception, but do NOT crash the kernel!
1211 # exception, but do NOT crash the kernel!
1195 sys.excepthook(*sys.exc_info())
1212 sys.excepthook(*sys.exc_info())
1196 else:
1213 else:
1197 for matcher in self.matchers:
1214 for matcher in self.matchers:
1198 self.matches = matcher(text)
1215 self.matches = matcher(text)
1199 if self.matches:
1216 if self.matches:
1200 break
1217 break
1201 # FIXME: we should extend our api to return a dict with completions for
1218 # FIXME: we should extend our api to return a dict with completions for
1202 # different types of objects. The rlcomplete() method could then
1219 # different types of objects. The rlcomplete() method could then
1203 # simply collapse the dict into a list for readline, but we'd have
1220 # simply collapse the dict into a list for readline, but we'd have
1204 # richer completion semantics in other evironments.
1221 # richer completion semantics in other evironments.
1205
1222
1206 self.matches = sorted(set(self.matches), key=completions_sorting_key)
1223 self.matches = sorted(set(self.matches), key=completions_sorting_key)
1207
1224
1208 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1225 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1209 return text, self.matches
1226 return text, self.matches
1210
1227
1211 def rlcomplete(self, text, state):
1228 def rlcomplete(self, text, state):
1212 """Return the state-th possible completion for 'text'.
1229 """Return the state-th possible completion for 'text'.
1213
1230
1214 This is called successively with state == 0, 1, 2, ... until it
1231 This is called successively with state == 0, 1, 2, ... until it
1215 returns None. The completion should begin with 'text'.
1232 returns None. The completion should begin with 'text'.
1216
1233
1217 Parameters
1234 Parameters
1218 ----------
1235 ----------
1219 text : string
1236 text : string
1220 Text to perform the completion on.
1237 Text to perform the completion on.
1221
1238
1222 state : int
1239 state : int
1223 Counter used by readline.
1240 Counter used by readline.
1224 """
1241 """
1225 if state==0:
1242 if state==0:
1226
1243
1227 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1244 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1228 cursor_pos = self.readline.get_endidx()
1245 cursor_pos = self.readline.get_endidx()
1229
1246
1230 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1247 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1231 # (text, line_buffer, cursor_pos) ) # dbg
1248 # (text, line_buffer, cursor_pos) ) # dbg
1232
1249
1233 # if there is only a tab on a line with only whitespace, instead of
1250 # if there is only a tab on a line with only whitespace, instead of
1234 # the mostly useless 'do you want to see all million completions'
1251 # the mostly useless 'do you want to see all million completions'
1235 # message, just do the right thing and give the user his tab!
1252 # message, just do the right thing and give the user his tab!
1236 # Incidentally, this enables pasting of tabbed text from an editor
1253 # Incidentally, this enables pasting of tabbed text from an editor
1237 # (as long as autoindent is off).
1254 # (as long as autoindent is off).
1238
1255
1239 # It should be noted that at least pyreadline still shows file
1256 # It should be noted that at least pyreadline still shows file
1240 # completions - is there a way around it?
1257 # completions - is there a way around it?
1241
1258
1242 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1259 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1243 # we don't interfere with their own tab-completion mechanism.
1260 # we don't interfere with their own tab-completion mechanism.
1244 if not (self.dumb_terminal or line_buffer.strip()):
1261 if not (self.dumb_terminal or line_buffer.strip()):
1245 self.readline.insert_text('\t')
1262 self.readline.insert_text('\t')
1246 sys.stdout.flush()
1263 sys.stdout.flush()
1247 return None
1264 return None
1248
1265
1249 # Note: debugging exceptions that may occur in completion is very
1266 # Note: debugging exceptions that may occur in completion is very
1250 # tricky, because readline unconditionally silences them. So if
1267 # tricky, because readline unconditionally silences them. So if
1251 # during development you suspect a bug in the completion code, turn
1268 # during development you suspect a bug in the completion code, turn
1252 # this flag on temporarily by uncommenting the second form (don't
1269 # this flag on temporarily by uncommenting the second form (don't
1253 # flip the value in the first line, as the '# dbg' marker can be
1270 # flip the value in the first line, as the '# dbg' marker can be
1254 # automatically detected and is used elsewhere).
1271 # automatically detected and is used elsewhere).
1255 DEBUG = False
1272 DEBUG = False
1256 #DEBUG = True # dbg
1273 #DEBUG = True # dbg
1257 if DEBUG:
1274 if DEBUG:
1258 try:
1275 try:
1259 self.complete(text, line_buffer, cursor_pos)
1276 self.complete(text, line_buffer, cursor_pos)
1260 except:
1277 except:
1261 import traceback; traceback.print_exc()
1278 import traceback; traceback.print_exc()
1262 else:
1279 else:
1263 # The normal production version is here
1280 # The normal production version is here
1264
1281
1265 # This method computes the self.matches array
1282 # This method computes the self.matches array
1266 self.complete(text, line_buffer, cursor_pos)
1283 self.complete(text, line_buffer, cursor_pos)
1267
1284
1268 try:
1285 try:
1269 return self.matches[state]
1286 return self.matches[state]
1270 except IndexError:
1287 except IndexError:
1271 return None
1288 return None
1272
1289
@@ -1,780 +1,796 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Tests for the IPython tab-completion machinery."""
2 """Tests for the IPython tab-completion machinery."""
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import sys
8 import sys
9 import unittest
9 import unittest
10
10
11 from contextlib import contextmanager
11 from contextlib import contextmanager
12
12
13 import nose.tools as nt
13 import nose.tools as nt
14
14
15 from traitlets.config.loader import Config
15 from traitlets.config.loader import Config
16 from IPython import get_ipython
16 from IPython import get_ipython
17 from IPython.core import completer
17 from IPython.core import completer
18 from IPython.external.decorators import knownfailureif
18 from IPython.external.decorators import knownfailureif
19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 from IPython.utils.generics import complete_object
20 from IPython.utils.generics import complete_object
21 from IPython.utils.py3compat import string_types, unicode_type
21 from IPython.utils.py3compat import string_types, unicode_type
22 from IPython.testing import decorators as dec
22 from IPython.testing import decorators as dec
23
23
24 #-----------------------------------------------------------------------------
24 #-----------------------------------------------------------------------------
25 # Test functions
25 # Test functions
26 #-----------------------------------------------------------------------------
26 #-----------------------------------------------------------------------------
27
27
28 @contextmanager
28 @contextmanager
29 def greedy_completion():
29 def greedy_completion():
30 ip = get_ipython()
30 ip = get_ipython()
31 greedy_original = ip.Completer.greedy
31 greedy_original = ip.Completer.greedy
32 try:
32 try:
33 ip.Completer.greedy = True
33 ip.Completer.greedy = True
34 yield
34 yield
35 finally:
35 finally:
36 ip.Completer.greedy = greedy_original
36 ip.Completer.greedy = greedy_original
37
37
38 def test_protect_filename():
38 def test_protect_filename():
39 pairs = [ ('abc','abc'),
39 pairs = [ ('abc','abc'),
40 (' abc',r'\ abc'),
40 (' abc',r'\ abc'),
41 ('a bc',r'a\ bc'),
41 ('a bc',r'a\ bc'),
42 ('a bc',r'a\ \ bc'),
42 ('a bc',r'a\ \ bc'),
43 (' bc',r'\ \ bc'),
43 (' bc',r'\ \ bc'),
44 ]
44 ]
45 # On posix, we also protect parens and other special characters
45 # On posix, we also protect parens and other special characters
46 if sys.platform != 'win32':
46 if sys.platform != 'win32':
47 pairs.extend( [('a(bc',r'a\(bc'),
47 pairs.extend( [('a(bc',r'a\(bc'),
48 ('a)bc',r'a\)bc'),
48 ('a)bc',r'a\)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
50 ('a[1]bc', r'a\[1\]bc'),
50 ('a[1]bc', r'a\[1\]bc'),
51 ('a{1}bc', r'a\{1\}bc'),
51 ('a{1}bc', r'a\{1\}bc'),
52 ('a#bc', r'a\#bc'),
52 ('a#bc', r'a\#bc'),
53 ('a?bc', r'a\?bc'),
53 ('a?bc', r'a\?bc'),
54 ('a=bc', r'a\=bc'),
54 ('a=bc', r'a\=bc'),
55 ('a\\bc', r'a\\bc'),
55 ('a\\bc', r'a\\bc'),
56 ('a|bc', r'a\|bc'),
56 ('a|bc', r'a\|bc'),
57 ('a;bc', r'a\;bc'),
57 ('a;bc', r'a\;bc'),
58 ('a:bc', r'a\:bc'),
58 ('a:bc', r'a\:bc'),
59 ("a'bc", r"a\'bc"),
59 ("a'bc", r"a\'bc"),
60 ('a*bc', r'a\*bc'),
60 ('a*bc', r'a\*bc'),
61 ('a"bc', r'a\"bc'),
61 ('a"bc', r'a\"bc'),
62 ('a^bc', r'a\^bc'),
62 ('a^bc', r'a\^bc'),
63 ('a&bc', r'a\&bc'),
63 ('a&bc', r'a\&bc'),
64 ] )
64 ] )
65 # run the actual tests
65 # run the actual tests
66 for s1, s2 in pairs:
66 for s1, s2 in pairs:
67 s1p = completer.protect_filename(s1)
67 s1p = completer.protect_filename(s1)
68 nt.assert_equal(s1p, s2)
68 nt.assert_equal(s1p, s2)
69
69
70
70
71 def check_line_split(splitter, test_specs):
71 def check_line_split(splitter, test_specs):
72 for part1, part2, split in test_specs:
72 for part1, part2, split in test_specs:
73 cursor_pos = len(part1)
73 cursor_pos = len(part1)
74 line = part1+part2
74 line = part1+part2
75 out = splitter.split_line(line, cursor_pos)
75 out = splitter.split_line(line, cursor_pos)
76 nt.assert_equal(out, split)
76 nt.assert_equal(out, split)
77
77
78
78
79 def test_line_split():
79 def test_line_split():
80 """Basic line splitter test with default specs."""
80 """Basic line splitter test with default specs."""
81 sp = completer.CompletionSplitter()
81 sp = completer.CompletionSplitter()
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
84 # was at the end of part1. So an empty part2 represents someone hitting
84 # was at the end of part1. So an empty part2 represents someone hitting
85 # tab at the end of the line, the most common case.
85 # tab at the end of the line, the most common case.
86 t = [('run some/scrip', '', 'some/scrip'),
86 t = [('run some/scrip', '', 'some/scrip'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
88 ('echo $HOM', '', 'HOM'),
88 ('echo $HOM', '', 'HOM'),
89 ('print sys.pa', '', 'sys.pa'),
89 ('print sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
91 ("execfile('scripts/er", '', 'scripts/er'),
91 ("execfile('scripts/er", '', 'scripts/er'),
92 ('a[x.', '', 'x.'),
92 ('a[x.', '', 'x.'),
93 ('a[x.', 'y', 'x.'),
93 ('a[x.', 'y', 'x.'),
94 ('cd "some_file/', '', 'some_file/'),
94 ('cd "some_file/', '', 'some_file/'),
95 ]
95 ]
96 check_line_split(sp, t)
96 check_line_split(sp, t)
97 # Ensure splitting works OK with unicode by re-running the tests with
97 # Ensure splitting works OK with unicode by re-running the tests with
98 # all inputs turned into unicode
98 # all inputs turned into unicode
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
100
100
101
101
102 def test_custom_completion_error():
102 def test_custom_completion_error():
103 """Test that errors from custom attribute completers are silenced."""
103 """Test that errors from custom attribute completers are silenced."""
104 ip = get_ipython()
104 ip = get_ipython()
105 class A(object): pass
105 class A(object): pass
106 ip.user_ns['a'] = A()
106 ip.user_ns['a'] = A()
107
107
108 @complete_object.when_type(A)
108 @complete_object.when_type(A)
109 def complete_A(a, existing_completions):
109 def complete_A(a, existing_completions):
110 raise TypeError("this should be silenced")
110 raise TypeError("this should be silenced")
111
111
112 ip.complete("a.")
112 ip.complete("a.")
113
113
114
114
115 def test_unicode_completions():
115 def test_unicode_completions():
116 ip = get_ipython()
116 ip = get_ipython()
117 # Some strings that trigger different types of completion. Check them both
117 # Some strings that trigger different types of completion. Check them both
118 # in str and unicode forms
118 # in str and unicode forms
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
120 for t in s + list(map(unicode_type, s)):
120 for t in s + list(map(unicode_type, s)):
121 # We don't need to check exact completion values (they may change
121 # We don't need to check exact completion values (they may change
122 # depending on the state of the namespace, but at least no exceptions
122 # depending on the state of the namespace, but at least no exceptions
123 # should be thrown and the return value should be a pair of text, list
123 # should be thrown and the return value should be a pair of text, list
124 # values.
124 # values.
125 text, matches = ip.complete(t)
125 text, matches = ip.complete(t)
126 nt.assert_true(isinstance(text, string_types))
126 nt.assert_true(isinstance(text, string_types))
127 nt.assert_true(isinstance(matches, list))
127 nt.assert_true(isinstance(matches, list))
128
128
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
130 def test_latex_completions():
130 def test_latex_completions():
131 from IPython.core.latex_symbols import latex_symbols
131 from IPython.core.latex_symbols import latex_symbols
132 import random
132 import random
133 ip = get_ipython()
133 ip = get_ipython()
134 # Test some random unicode symbols
134 # Test some random unicode symbols
135 keys = random.sample(latex_symbols.keys(), 10)
135 keys = random.sample(latex_symbols.keys(), 10)
136 for k in keys:
136 for k in keys:
137 text, matches = ip.complete(k)
137 text, matches = ip.complete(k)
138 nt.assert_equal(len(matches),1)
138 nt.assert_equal(len(matches),1)
139 nt.assert_equal(text, k)
139 nt.assert_equal(text, k)
140 nt.assert_equal(matches[0], latex_symbols[k])
140 nt.assert_equal(matches[0], latex_symbols[k])
141 # Test a more complex line
141 # Test a more complex line
142 text, matches = ip.complete(u'print(\\alpha')
142 text, matches = ip.complete(u'print(\\alpha')
143 nt.assert_equals(text, u'\\alpha')
143 nt.assert_equals(text, u'\\alpha')
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
145 # Test multiple matching latex symbols
145 # Test multiple matching latex symbols
146 text, matches = ip.complete(u'\\al')
146 text, matches = ip.complete(u'\\al')
147 nt.assert_in('\\alpha', matches)
147 nt.assert_in('\\alpha', matches)
148 nt.assert_in('\\aleph', matches)
148 nt.assert_in('\\aleph', matches)
149
149
150
150
151
151
152
152
153 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
153 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
154 def test_back_latex_completion():
154 def test_back_latex_completion():
155 ip = get_ipython()
155 ip = get_ipython()
156
156
157 # do not return more than 1 matches fro \beta, only the latex one.
157 # do not return more than 1 matches fro \beta, only the latex one.
158 name, matches = ip.complete('\\Ξ²')
158 name, matches = ip.complete('\\Ξ²')
159 nt.assert_equal(len(matches), 1)
159 nt.assert_equal(len(matches), 1)
160 nt.assert_equal(matches[0], '\\beta')
160 nt.assert_equal(matches[0], '\\beta')
161
161
162 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
162 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
163 def test_back_unicode_completion():
163 def test_back_unicode_completion():
164 ip = get_ipython()
164 ip = get_ipython()
165
165
166 name, matches = ip.complete('\\β…€')
166 name, matches = ip.complete('\\β…€')
167 nt.assert_equal(len(matches), 1)
167 nt.assert_equal(len(matches), 1)
168 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
168 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
169
169
170
170
171 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
171 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
172 def test_forward_unicode_completion():
172 def test_forward_unicode_completion():
173 ip = get_ipython()
173 ip = get_ipython()
174
174
175 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
175 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
176 nt.assert_equal(len(matches), 1)
176 nt.assert_equal(len(matches), 1)
177 nt.assert_equal(matches[0], 'β…€')
177 nt.assert_equal(matches[0], 'β…€')
178
178
179 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
179 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
180 def test_no_ascii_back_completion():
180 def test_no_ascii_back_completion():
181 ip = get_ipython()
181 ip = get_ipython()
182 with TemporaryWorkingDirectory(): # Avoid any filename completions
182 with TemporaryWorkingDirectory(): # Avoid any filename completions
183 # single ascii letter that don't have yet completions
183 # single ascii letter that don't have yet completions
184 for letter in 'fjqyJMQVWY' :
184 for letter in 'fjqyJMQVWY' :
185 name, matches = ip.complete('\\'+letter)
185 name, matches = ip.complete('\\'+letter)
186 nt.assert_equal(matches, [])
186 nt.assert_equal(matches, [])
187
187
188
188
189
189
190
190
191 class CompletionSplitterTestCase(unittest.TestCase):
191 class CompletionSplitterTestCase(unittest.TestCase):
192 def setUp(self):
192 def setUp(self):
193 self.sp = completer.CompletionSplitter()
193 self.sp = completer.CompletionSplitter()
194
194
195 def test_delim_setting(self):
195 def test_delim_setting(self):
196 self.sp.delims = ' '
196 self.sp.delims = ' '
197 nt.assert_equal(self.sp.delims, ' ')
197 nt.assert_equal(self.sp.delims, ' ')
198 nt.assert_equal(self.sp._delim_expr, '[\ ]')
198 nt.assert_equal(self.sp._delim_expr, '[\ ]')
199
199
200 def test_spaces(self):
200 def test_spaces(self):
201 """Test with only spaces as split chars."""
201 """Test with only spaces as split chars."""
202 self.sp.delims = ' '
202 self.sp.delims = ' '
203 t = [('foo', '', 'foo'),
203 t = [('foo', '', 'foo'),
204 ('run foo', '', 'foo'),
204 ('run foo', '', 'foo'),
205 ('run foo', 'bar', 'foo'),
205 ('run foo', 'bar', 'foo'),
206 ]
206 ]
207 check_line_split(self.sp, t)
207 check_line_split(self.sp, t)
208
208
209
209
210 def test_has_open_quotes1():
210 def test_has_open_quotes1():
211 for s in ["'", "'''", "'hi' '"]:
211 for s in ["'", "'''", "'hi' '"]:
212 nt.assert_equal(completer.has_open_quotes(s), "'")
212 nt.assert_equal(completer.has_open_quotes(s), "'")
213
213
214
214
215 def test_has_open_quotes2():
215 def test_has_open_quotes2():
216 for s in ['"', '"""', '"hi" "']:
216 for s in ['"', '"""', '"hi" "']:
217 nt.assert_equal(completer.has_open_quotes(s), '"')
217 nt.assert_equal(completer.has_open_quotes(s), '"')
218
218
219
219
220 def test_has_open_quotes3():
220 def test_has_open_quotes3():
221 for s in ["''", "''' '''", "'hi' 'ipython'"]:
221 for s in ["''", "''' '''", "'hi' 'ipython'"]:
222 nt.assert_false(completer.has_open_quotes(s))
222 nt.assert_false(completer.has_open_quotes(s))
223
223
224
224
225 def test_has_open_quotes4():
225 def test_has_open_quotes4():
226 for s in ['""', '""" """', '"hi" "ipython"']:
226 for s in ['""', '""" """', '"hi" "ipython"']:
227 nt.assert_false(completer.has_open_quotes(s))
227 nt.assert_false(completer.has_open_quotes(s))
228
228
229
229
230 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
230 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
231 def test_abspath_file_completions():
231 def test_abspath_file_completions():
232 ip = get_ipython()
232 ip = get_ipython()
233 with TemporaryDirectory() as tmpdir:
233 with TemporaryDirectory() as tmpdir:
234 prefix = os.path.join(tmpdir, 'foo')
234 prefix = os.path.join(tmpdir, 'foo')
235 suffixes = ['1', '2']
235 suffixes = ['1', '2']
236 names = [prefix+s for s in suffixes]
236 names = [prefix+s for s in suffixes]
237 for n in names:
237 for n in names:
238 open(n, 'w').close()
238 open(n, 'w').close()
239
239
240 # Check simple completion
240 # Check simple completion
241 c = ip.complete(prefix)[1]
241 c = ip.complete(prefix)[1]
242 nt.assert_equal(c, names)
242 nt.assert_equal(c, names)
243
243
244 # Now check with a function call
244 # Now check with a function call
245 cmd = 'a = f("%s' % prefix
245 cmd = 'a = f("%s' % prefix
246 c = ip.complete(prefix, cmd)[1]
246 c = ip.complete(prefix, cmd)[1]
247 comp = [prefix+s for s in suffixes]
247 comp = [prefix+s for s in suffixes]
248 nt.assert_equal(c, comp)
248 nt.assert_equal(c, comp)
249
249
250
250
251 def test_local_file_completions():
251 def test_local_file_completions():
252 ip = get_ipython()
252 ip = get_ipython()
253 with TemporaryWorkingDirectory():
253 with TemporaryWorkingDirectory():
254 prefix = './foo'
254 prefix = './foo'
255 suffixes = ['1', '2']
255 suffixes = ['1', '2']
256 names = [prefix+s for s in suffixes]
256 names = [prefix+s for s in suffixes]
257 for n in names:
257 for n in names:
258 open(n, 'w').close()
258 open(n, 'w').close()
259
259
260 # Check simple completion
260 # Check simple completion
261 c = ip.complete(prefix)[1]
261 c = ip.complete(prefix)[1]
262 nt.assert_equal(c, names)
262 nt.assert_equal(c, names)
263
263
264 # Now check with a function call
264 # Now check with a function call
265 cmd = 'a = f("%s' % prefix
265 cmd = 'a = f("%s' % prefix
266 c = ip.complete(prefix, cmd)[1]
266 c = ip.complete(prefix, cmd)[1]
267 comp = [prefix+s for s in suffixes]
267 comp = [prefix+s for s in suffixes]
268 nt.assert_equal(c, comp)
268 nt.assert_equal(c, comp)
269
269
270
270
271 def test_greedy_completions():
271 def test_greedy_completions():
272 ip = get_ipython()
272 ip = get_ipython()
273 ip.ex('a=list(range(5))')
273 ip.ex('a=list(range(5))')
274 _,c = ip.complete('.',line='a[0].')
274 _,c = ip.complete('.',line='a[0].')
275 nt.assert_false('a[0].real' in c,
275 nt.assert_false('a[0].real' in c,
276 "Shouldn't have completed on a[0]: %s"%c)
276 "Shouldn't have completed on a[0]: %s"%c)
277 with greedy_completion():
277 with greedy_completion():
278 _,c = ip.complete('.',line='a[0].')
278 _,c = ip.complete('.',line='a[0].')
279 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
279 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
280
280
281
281
282 def test_omit__names():
282 def test_omit__names():
283 # also happens to test IPCompleter as a configurable
283 # also happens to test IPCompleter as a configurable
284 ip = get_ipython()
284 ip = get_ipython()
285 ip._hidden_attr = 1
285 ip._hidden_attr = 1
286 ip._x = {}
286 ip._x = {}
287 c = ip.Completer
287 c = ip.Completer
288 ip.ex('ip=get_ipython()')
288 ip.ex('ip=get_ipython()')
289 cfg = Config()
289 cfg = Config()
290 cfg.IPCompleter.omit__names = 0
290 cfg.IPCompleter.omit__names = 0
291 c.update_config(cfg)
291 c.update_config(cfg)
292 s,matches = c.complete('ip.')
292 s,matches = c.complete('ip.')
293 nt.assert_in('ip.__str__', matches)
293 nt.assert_in('ip.__str__', matches)
294 nt.assert_in('ip._hidden_attr', matches)
294 nt.assert_in('ip._hidden_attr', matches)
295 cfg = Config()
295 cfg = Config()
296 cfg.IPCompleter.omit__names = 1
296 cfg.IPCompleter.omit__names = 1
297 c.update_config(cfg)
297 c.update_config(cfg)
298 s,matches = c.complete('ip.')
298 s,matches = c.complete('ip.')
299 nt.assert_not_in('ip.__str__', matches)
299 nt.assert_not_in('ip.__str__', matches)
300 nt.assert_in('ip._hidden_attr', matches)
300 nt.assert_in('ip._hidden_attr', matches)
301 cfg = Config()
301 cfg = Config()
302 cfg.IPCompleter.omit__names = 2
302 cfg.IPCompleter.omit__names = 2
303 c.update_config(cfg)
303 c.update_config(cfg)
304 s,matches = c.complete('ip.')
304 s,matches = c.complete('ip.')
305 nt.assert_not_in('ip.__str__', matches)
305 nt.assert_not_in('ip.__str__', matches)
306 nt.assert_not_in('ip._hidden_attr', matches)
306 nt.assert_not_in('ip._hidden_attr', matches)
307 s,matches = c.complete('ip._x.')
307 s,matches = c.complete('ip._x.')
308 nt.assert_in('ip._x.keys', matches)
308 nt.assert_in('ip._x.keys', matches)
309 del ip._hidden_attr
309 del ip._hidden_attr
310
310
311
311
312 def test_limit_to__all__False_ok():
312 def test_limit_to__all__False_ok():
313 ip = get_ipython()
313 ip = get_ipython()
314 c = ip.Completer
314 c = ip.Completer
315 ip.ex('class D: x=24')
315 ip.ex('class D: x=24')
316 ip.ex('d=D()')
316 ip.ex('d=D()')
317 cfg = Config()
317 cfg = Config()
318 cfg.IPCompleter.limit_to__all__ = False
318 cfg.IPCompleter.limit_to__all__ = False
319 c.update_config(cfg)
319 c.update_config(cfg)
320 s, matches = c.complete('d.')
320 s, matches = c.complete('d.')
321 nt.assert_in('d.x', matches)
321 nt.assert_in('d.x', matches)
322
322
323
323
324 def test_limit_to__all__True_ok():
324 def test_limit_to__all__True_ok():
325 ip = get_ipython()
325 ip = get_ipython()
326 c = ip.Completer
326 c = ip.Completer
327 ip.ex('class D: x=24')
327 ip.ex('class D: x=24')
328 ip.ex('d=D()')
328 ip.ex('d=D()')
329 ip.ex("d.__all__=['z']")
329 ip.ex("d.__all__=['z']")
330 cfg = Config()
330 cfg = Config()
331 cfg.IPCompleter.limit_to__all__ = True
331 cfg.IPCompleter.limit_to__all__ = True
332 c.update_config(cfg)
332 c.update_config(cfg)
333 s, matches = c.complete('d.')
333 s, matches = c.complete('d.')
334 nt.assert_in('d.z', matches)
334 nt.assert_in('d.z', matches)
335 nt.assert_not_in('d.x', matches)
335 nt.assert_not_in('d.x', matches)
336
336
337
337
338 def test_get__all__entries_ok():
338 def test_get__all__entries_ok():
339 class A(object):
339 class A(object):
340 __all__ = ['x', 1]
340 __all__ = ['x', 1]
341 words = completer.get__all__entries(A())
341 words = completer.get__all__entries(A())
342 nt.assert_equal(words, ['x'])
342 nt.assert_equal(words, ['x'])
343
343
344
344
345 def test_get__all__entries_no__all__ok():
345 def test_get__all__entries_no__all__ok():
346 class A(object):
346 class A(object):
347 pass
347 pass
348 words = completer.get__all__entries(A())
348 words = completer.get__all__entries(A())
349 nt.assert_equal(words, [])
349 nt.assert_equal(words, [])
350
350
351
351
352 def test_func_kw_completions():
352 def test_func_kw_completions():
353 ip = get_ipython()
353 ip = get_ipython()
354 c = ip.Completer
354 c = ip.Completer
355 ip.ex('def myfunc(a=1,b=2): return a+b')
355 ip.ex('def myfunc(a=1,b=2): return a+b')
356 s, matches = c.complete(None, 'myfunc(1,b')
356 s, matches = c.complete(None, 'myfunc(1,b')
357 nt.assert_in('b=', matches)
357 nt.assert_in('b=', matches)
358 # Simulate completing with cursor right after b (pos==10):
358 # Simulate completing with cursor right after b (pos==10):
359 s, matches = c.complete(None, 'myfunc(1,b)', 10)
359 s, matches = c.complete(None, 'myfunc(1,b)', 10)
360 nt.assert_in('b=', matches)
360 nt.assert_in('b=', matches)
361 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
361 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
362 nt.assert_in('b=', matches)
362 nt.assert_in('b=', matches)
363 #builtin function
363 #builtin function
364 s, matches = c.complete(None, 'min(k, k')
364 s, matches = c.complete(None, 'min(k, k')
365 nt.assert_in('key=', matches)
365 nt.assert_in('key=', matches)
366
366
367
367
368 def test_default_arguments_from_docstring():
368 def test_default_arguments_from_docstring():
369 doc = min.__doc__
369 doc = min.__doc__
370 ip = get_ipython()
370 ip = get_ipython()
371 c = ip.Completer
371 c = ip.Completer
372 kwd = c._default_arguments_from_docstring(
372 kwd = c._default_arguments_from_docstring(
373 'min(iterable[, key=func]) -> value')
373 'min(iterable[, key=func]) -> value')
374 nt.assert_equal(kwd, ['key'])
374 nt.assert_equal(kwd, ['key'])
375 #with cython type etc
375 #with cython type etc
376 kwd = c._default_arguments_from_docstring(
376 kwd = c._default_arguments_from_docstring(
377 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
377 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
378 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
378 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
379 #white spaces
379 #white spaces
380 kwd = c._default_arguments_from_docstring(
380 kwd = c._default_arguments_from_docstring(
381 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
381 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
382 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
382 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
383
383
384 def test_line_magics():
384 def test_line_magics():
385 ip = get_ipython()
385 ip = get_ipython()
386 c = ip.Completer
386 c = ip.Completer
387 s, matches = c.complete(None, 'lsmag')
387 s, matches = c.complete(None, 'lsmag')
388 nt.assert_in('%lsmagic', matches)
388 nt.assert_in('%lsmagic', matches)
389 s, matches = c.complete(None, '%lsmag')
389 s, matches = c.complete(None, '%lsmag')
390 nt.assert_in('%lsmagic', matches)
390 nt.assert_in('%lsmagic', matches)
391
391
392
392
393 def test_cell_magics():
393 def test_cell_magics():
394 from IPython.core.magic import register_cell_magic
394 from IPython.core.magic import register_cell_magic
395
395
396 @register_cell_magic
396 @register_cell_magic
397 def _foo_cellm(line, cell):
397 def _foo_cellm(line, cell):
398 pass
398 pass
399
399
400 ip = get_ipython()
400 ip = get_ipython()
401 c = ip.Completer
401 c = ip.Completer
402
402
403 s, matches = c.complete(None, '_foo_ce')
403 s, matches = c.complete(None, '_foo_ce')
404 nt.assert_in('%%_foo_cellm', matches)
404 nt.assert_in('%%_foo_cellm', matches)
405 s, matches = c.complete(None, '%%_foo_ce')
405 s, matches = c.complete(None, '%%_foo_ce')
406 nt.assert_in('%%_foo_cellm', matches)
406 nt.assert_in('%%_foo_cellm', matches)
407
407
408
408
409 def test_line_cell_magics():
409 def test_line_cell_magics():
410 from IPython.core.magic import register_line_cell_magic
410 from IPython.core.magic import register_line_cell_magic
411
411
412 @register_line_cell_magic
412 @register_line_cell_magic
413 def _bar_cellm(line, cell):
413 def _bar_cellm(line, cell):
414 pass
414 pass
415
415
416 ip = get_ipython()
416 ip = get_ipython()
417 c = ip.Completer
417 c = ip.Completer
418
418
419 # The policy here is trickier, see comments in completion code. The
419 # The policy here is trickier, see comments in completion code. The
420 # returned values depend on whether the user passes %% or not explicitly,
420 # returned values depend on whether the user passes %% or not explicitly,
421 # and this will show a difference if the same name is both a line and cell
421 # and this will show a difference if the same name is both a line and cell
422 # magic.
422 # magic.
423 s, matches = c.complete(None, '_bar_ce')
423 s, matches = c.complete(None, '_bar_ce')
424 nt.assert_in('%_bar_cellm', matches)
424 nt.assert_in('%_bar_cellm', matches)
425 nt.assert_in('%%_bar_cellm', matches)
425 nt.assert_in('%%_bar_cellm', matches)
426 s, matches = c.complete(None, '%_bar_ce')
426 s, matches = c.complete(None, '%_bar_ce')
427 nt.assert_in('%_bar_cellm', matches)
427 nt.assert_in('%_bar_cellm', matches)
428 nt.assert_in('%%_bar_cellm', matches)
428 nt.assert_in('%%_bar_cellm', matches)
429 s, matches = c.complete(None, '%%_bar_ce')
429 s, matches = c.complete(None, '%%_bar_ce')
430 nt.assert_not_in('%_bar_cellm', matches)
430 nt.assert_not_in('%_bar_cellm', matches)
431 nt.assert_in('%%_bar_cellm', matches)
431 nt.assert_in('%%_bar_cellm', matches)
432
432
433
433
434 def test_magic_completion_order():
434 def test_magic_completion_order():
435
435
436 ip = get_ipython()
436 ip = get_ipython()
437 c = ip.Completer
437 c = ip.Completer
438
438
439 # Test ordering of magics and non-magics with the same name
439 # Test ordering of magics and non-magics with the same name
440 # We want the non-magic first
440 # We want the non-magic first
441
441
442 # Before importing matplotlib, there should only be one option:
442 # Before importing matplotlib, there should only be one option:
443
443
444 text, matches = c.complete('mat')
444 text, matches = c.complete('mat')
445 nt.assert_equal(matches, ["%matplotlib"])
445 nt.assert_equal(matches, ["%matplotlib"])
446
446
447
447
448 ip.run_cell("matplotlib = 1") # introduce name into namespace
448 ip.run_cell("matplotlib = 1") # introduce name into namespace
449
449
450 # After the import, there should be two options, ordered like this:
450 # After the import, there should be two options, ordered like this:
451 text, matches = c.complete('mat')
451 text, matches = c.complete('mat')
452 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
452 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
453
453
454
454
455 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
455 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
456
456
457 # Order of user variable and line and cell magics with same name:
457 # Order of user variable and line and cell magics with same name:
458 text, matches = c.complete('timeit')
458 text, matches = c.complete('timeit')
459 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
459 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
460
460
461
461
462 def test_dict_key_completion_string():
462 def test_dict_key_completion_string():
463 """Test dictionary key completion for string keys"""
463 """Test dictionary key completion for string keys"""
464 ip = get_ipython()
464 ip = get_ipython()
465 complete = ip.Completer.complete
465 complete = ip.Completer.complete
466
466
467 ip.user_ns['d'] = {'abc': None}
467 ip.user_ns['d'] = {'abc': None}
468
468
469 # check completion at different stages
469 # check completion at different stages
470 _, matches = complete(line_buffer="d[")
470 _, matches = complete(line_buffer="d[")
471 nt.assert_in("'abc'", matches)
471 nt.assert_in("'abc'", matches)
472 nt.assert_not_in("'abc']", matches)
472 nt.assert_not_in("'abc']", matches)
473
473
474 _, matches = complete(line_buffer="d['")
474 _, matches = complete(line_buffer="d['")
475 nt.assert_in("abc", matches)
475 nt.assert_in("abc", matches)
476 nt.assert_not_in("abc']", matches)
476 nt.assert_not_in("abc']", matches)
477
477
478 _, matches = complete(line_buffer="d['a")
478 _, matches = complete(line_buffer="d['a")
479 nt.assert_in("abc", matches)
479 nt.assert_in("abc", matches)
480 nt.assert_not_in("abc']", matches)
480 nt.assert_not_in("abc']", matches)
481
481
482 # check use of different quoting
482 # check use of different quoting
483 _, matches = complete(line_buffer="d[\"")
483 _, matches = complete(line_buffer="d[\"")
484 nt.assert_in("abc", matches)
484 nt.assert_in("abc", matches)
485 nt.assert_not_in('abc\"]', matches)
485 nt.assert_not_in('abc\"]', matches)
486
486
487 _, matches = complete(line_buffer="d[\"a")
487 _, matches = complete(line_buffer="d[\"a")
488 nt.assert_in("abc", matches)
488 nt.assert_in("abc", matches)
489 nt.assert_not_in('abc\"]', matches)
489 nt.assert_not_in('abc\"]', matches)
490
490
491 # check sensitivity to following context
491 # check sensitivity to following context
492 _, matches = complete(line_buffer="d[]", cursor_pos=2)
492 _, matches = complete(line_buffer="d[]", cursor_pos=2)
493 nt.assert_in("'abc'", matches)
493 nt.assert_in("'abc'", matches)
494
494
495 _, matches = complete(line_buffer="d['']", cursor_pos=3)
495 _, matches = complete(line_buffer="d['']", cursor_pos=3)
496 nt.assert_in("abc", matches)
496 nt.assert_in("abc", matches)
497 nt.assert_not_in("abc'", matches)
497 nt.assert_not_in("abc'", matches)
498 nt.assert_not_in("abc']", matches)
498 nt.assert_not_in("abc']", matches)
499
499
500 # check multiple solutions are correctly returned and that noise is not
500 # check multiple solutions are correctly returned and that noise is not
501 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
501 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
502 5: None}
502 5: None}
503
503
504 _, matches = complete(line_buffer="d['a")
504 _, matches = complete(line_buffer="d['a")
505 nt.assert_in("abc", matches)
505 nt.assert_in("abc", matches)
506 nt.assert_in("abd", matches)
506 nt.assert_in("abd", matches)
507 nt.assert_not_in("bad", matches)
507 nt.assert_not_in("bad", matches)
508 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
508 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
509
509
510 # check escaping and whitespace
510 # check escaping and whitespace
511 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
511 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
512 _, matches = complete(line_buffer="d['a")
512 _, matches = complete(line_buffer="d['a")
513 nt.assert_in("a\\nb", matches)
513 nt.assert_in("a\\nb", matches)
514 nt.assert_in("a\\'b", matches)
514 nt.assert_in("a\\'b", matches)
515 nt.assert_in("a\"b", matches)
515 nt.assert_in("a\"b", matches)
516 nt.assert_in("a word", matches)
516 nt.assert_in("a word", matches)
517 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
517 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
518
518
519 # - can complete on non-initial word of the string
519 # - can complete on non-initial word of the string
520 _, matches = complete(line_buffer="d['a w")
520 _, matches = complete(line_buffer="d['a w")
521 nt.assert_in("word", matches)
521 nt.assert_in("word", matches)
522
522
523 # - understands quote escaping
523 # - understands quote escaping
524 _, matches = complete(line_buffer="d['a\\'")
524 _, matches = complete(line_buffer="d['a\\'")
525 nt.assert_in("b", matches)
525 nt.assert_in("b", matches)
526
526
527 # - default quoting should work like repr
527 # - default quoting should work like repr
528 _, matches = complete(line_buffer="d[")
528 _, matches = complete(line_buffer="d[")
529 nt.assert_in("\"a'b\"", matches)
529 nt.assert_in("\"a'b\"", matches)
530
530
531 # - when opening quote with ", possible to match with unescaped apostrophe
531 # - when opening quote with ", possible to match with unescaped apostrophe
532 _, matches = complete(line_buffer="d[\"a'")
532 _, matches = complete(line_buffer="d[\"a'")
533 nt.assert_in("b", matches)
533 nt.assert_in("b", matches)
534
534
535 # need to not split at delims that readline won't split at
535 # need to not split at delims that readline won't split at
536 if '-' not in ip.Completer.splitter.delims:
536 if '-' not in ip.Completer.splitter.delims:
537 ip.user_ns['d'] = {'before-after': None}
537 ip.user_ns['d'] = {'before-after': None}
538 _, matches = complete(line_buffer="d['before-af")
538 _, matches = complete(line_buffer="d['before-af")
539 nt.assert_in('before-after', matches)
539 nt.assert_in('before-after', matches)
540
540
541 def test_dict_key_completion_contexts():
541 def test_dict_key_completion_contexts():
542 """Test expression contexts in which dict key completion occurs"""
542 """Test expression contexts in which dict key completion occurs"""
543 ip = get_ipython()
543 ip = get_ipython()
544 complete = ip.Completer.complete
544 complete = ip.Completer.complete
545 d = {'abc': None}
545 d = {'abc': None}
546 ip.user_ns['d'] = d
546 ip.user_ns['d'] = d
547
547
548 class C:
548 class C:
549 data = d
549 data = d
550 ip.user_ns['C'] = C
550 ip.user_ns['C'] = C
551 ip.user_ns['get'] = lambda: d
551 ip.user_ns['get'] = lambda: d
552
552
553 def assert_no_completion(**kwargs):
553 def assert_no_completion(**kwargs):
554 _, matches = complete(**kwargs)
554 _, matches = complete(**kwargs)
555 nt.assert_not_in('abc', matches)
555 nt.assert_not_in('abc', matches)
556 nt.assert_not_in('abc\'', matches)
556 nt.assert_not_in('abc\'', matches)
557 nt.assert_not_in('abc\']', matches)
557 nt.assert_not_in('abc\']', matches)
558 nt.assert_not_in('\'abc\'', matches)
558 nt.assert_not_in('\'abc\'', matches)
559 nt.assert_not_in('\'abc\']', matches)
559 nt.assert_not_in('\'abc\']', matches)
560
560
561 def assert_completion(**kwargs):
561 def assert_completion(**kwargs):
562 _, matches = complete(**kwargs)
562 _, matches = complete(**kwargs)
563 nt.assert_in("'abc'", matches)
563 nt.assert_in("'abc'", matches)
564 nt.assert_not_in("'abc']", matches)
564 nt.assert_not_in("'abc']", matches)
565
565
566 # no completion after string closed, even if reopened
566 # no completion after string closed, even if reopened
567 assert_no_completion(line_buffer="d['a'")
567 assert_no_completion(line_buffer="d['a'")
568 assert_no_completion(line_buffer="d[\"a\"")
568 assert_no_completion(line_buffer="d[\"a\"")
569 assert_no_completion(line_buffer="d['a' + ")
569 assert_no_completion(line_buffer="d['a' + ")
570 assert_no_completion(line_buffer="d['a' + '")
570 assert_no_completion(line_buffer="d['a' + '")
571
571
572 # completion in non-trivial expressions
572 # completion in non-trivial expressions
573 assert_completion(line_buffer="+ d[")
573 assert_completion(line_buffer="+ d[")
574 assert_completion(line_buffer="(d[")
574 assert_completion(line_buffer="(d[")
575 assert_completion(line_buffer="C.data[")
575 assert_completion(line_buffer="C.data[")
576
576
577 # greedy flag
577 # greedy flag
578 def assert_completion(**kwargs):
578 def assert_completion(**kwargs):
579 _, matches = complete(**kwargs)
579 _, matches = complete(**kwargs)
580 nt.assert_in("get()['abc']", matches)
580 nt.assert_in("get()['abc']", matches)
581
581
582 assert_no_completion(line_buffer="get()[")
582 assert_no_completion(line_buffer="get()[")
583 with greedy_completion():
583 with greedy_completion():
584 assert_completion(line_buffer="get()[")
584 assert_completion(line_buffer="get()[")
585 assert_completion(line_buffer="get()['")
585 assert_completion(line_buffer="get()['")
586 assert_completion(line_buffer="get()['a")
586 assert_completion(line_buffer="get()['a")
587 assert_completion(line_buffer="get()['ab")
587 assert_completion(line_buffer="get()['ab")
588 assert_completion(line_buffer="get()['abc")
588 assert_completion(line_buffer="get()['abc")
589
589
590
590
591
591
592 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
592 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
593 def test_dict_key_completion_bytes():
593 def test_dict_key_completion_bytes():
594 """Test handling of bytes in dict key completion"""
594 """Test handling of bytes in dict key completion"""
595 ip = get_ipython()
595 ip = get_ipython()
596 complete = ip.Completer.complete
596 complete = ip.Completer.complete
597
597
598 ip.user_ns['d'] = {'abc': None, b'abd': None}
598 ip.user_ns['d'] = {'abc': None, b'abd': None}
599
599
600 _, matches = complete(line_buffer="d[")
600 _, matches = complete(line_buffer="d[")
601 nt.assert_in("'abc'", matches)
601 nt.assert_in("'abc'", matches)
602 nt.assert_in("b'abd'", matches)
602 nt.assert_in("b'abd'", matches)
603
603
604 if False: # not currently implemented
604 if False: # not currently implemented
605 _, matches = complete(line_buffer="d[b")
605 _, matches = complete(line_buffer="d[b")
606 nt.assert_in("b'abd'", matches)
606 nt.assert_in("b'abd'", matches)
607 nt.assert_not_in("b'abc'", matches)
607 nt.assert_not_in("b'abc'", matches)
608
608
609 _, matches = complete(line_buffer="d[b'")
609 _, matches = complete(line_buffer="d[b'")
610 nt.assert_in("abd", matches)
610 nt.assert_in("abd", matches)
611 nt.assert_not_in("abc", matches)
611 nt.assert_not_in("abc", matches)
612
612
613 _, matches = complete(line_buffer="d[B'")
613 _, matches = complete(line_buffer="d[B'")
614 nt.assert_in("abd", matches)
614 nt.assert_in("abd", matches)
615 nt.assert_not_in("abc", matches)
615 nt.assert_not_in("abc", matches)
616
616
617 _, matches = complete(line_buffer="d['")
617 _, matches = complete(line_buffer="d['")
618 nt.assert_in("abc", matches)
618 nt.assert_in("abc", matches)
619 nt.assert_not_in("abd", matches)
619 nt.assert_not_in("abd", matches)
620
620
621
621
622 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
622 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
623 def test_dict_key_completion_unicode_py2():
623 def test_dict_key_completion_unicode_py2():
624 """Test handling of unicode in dict key completion"""
624 """Test handling of unicode in dict key completion"""
625 ip = get_ipython()
625 ip = get_ipython()
626 complete = ip.Completer.complete
626 complete = ip.Completer.complete
627
627
628 ip.user_ns['d'] = {u'abc': None,
628 ip.user_ns['d'] = {u'abc': None,
629 u'a\u05d0b': None}
629 u'a\u05d0b': None}
630
630
631 _, matches = complete(line_buffer="d[")
631 _, matches = complete(line_buffer="d[")
632 nt.assert_in("u'abc'", matches)
632 nt.assert_in("u'abc'", matches)
633 nt.assert_in("u'a\\u05d0b'", matches)
633 nt.assert_in("u'a\\u05d0b'", matches)
634
634
635 _, matches = complete(line_buffer="d['a")
635 _, matches = complete(line_buffer="d['a")
636 nt.assert_in("abc", matches)
636 nt.assert_in("abc", matches)
637 nt.assert_not_in("a\\u05d0b", matches)
637 nt.assert_not_in("a\\u05d0b", matches)
638
638
639 _, matches = complete(line_buffer="d[u'a")
639 _, matches = complete(line_buffer="d[u'a")
640 nt.assert_in("abc", matches)
640 nt.assert_in("abc", matches)
641 nt.assert_in("a\\u05d0b", matches)
641 nt.assert_in("a\\u05d0b", matches)
642
642
643 _, matches = complete(line_buffer="d[U'a")
643 _, matches = complete(line_buffer="d[U'a")
644 nt.assert_in("abc", matches)
644 nt.assert_in("abc", matches)
645 nt.assert_in("a\\u05d0b", matches)
645 nt.assert_in("a\\u05d0b", matches)
646
646
647 # query using escape
647 # query using escape
648 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
648 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
649 nt.assert_in("u05d0b", matches) # tokenized after \\
649 nt.assert_in("u05d0b", matches) # tokenized after \\
650
650
651 # query using character
651 # query using character
652 _, matches = complete(line_buffer=u"d[u'a\u05d0")
652 _, matches = complete(line_buffer=u"d[u'a\u05d0")
653 nt.assert_in(u"a\u05d0b", matches)
653 nt.assert_in(u"a\u05d0b", matches)
654
654
655 with greedy_completion():
655 with greedy_completion():
656 _, matches = complete(line_buffer="d[")
656 _, matches = complete(line_buffer="d[")
657 nt.assert_in("d[u'abc']", matches)
657 nt.assert_in("d[u'abc']", matches)
658 nt.assert_in("d[u'a\\u05d0b']", matches)
658 nt.assert_in("d[u'a\\u05d0b']", matches)
659
659
660 _, matches = complete(line_buffer="d['a")
660 _, matches = complete(line_buffer="d['a")
661 nt.assert_in("d['abc']", matches)
661 nt.assert_in("d['abc']", matches)
662 nt.assert_not_in("d[u'a\\u05d0b']", matches)
662 nt.assert_not_in("d[u'a\\u05d0b']", matches)
663
663
664 _, matches = complete(line_buffer="d[u'a")
664 _, matches = complete(line_buffer="d[u'a")
665 nt.assert_in("d[u'abc']", matches)
665 nt.assert_in("d[u'abc']", matches)
666 nt.assert_in("d[u'a\\u05d0b']", matches)
666 nt.assert_in("d[u'a\\u05d0b']", matches)
667
667
668 _, matches = complete(line_buffer="d[U'a")
668 _, matches = complete(line_buffer="d[U'a")
669 nt.assert_in("d[U'abc']", matches)
669 nt.assert_in("d[U'abc']", matches)
670 nt.assert_in("d[U'a\\u05d0b']", matches)
670 nt.assert_in("d[U'a\\u05d0b']", matches)
671
671
672 # query using escape
672 # query using escape
673 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
673 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
674 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
674 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
675
675
676 # query using character
676 # query using character
677 _, matches = complete(line_buffer=u"d[u'a\u05d0")
677 _, matches = complete(line_buffer=u"d[u'a\u05d0")
678 nt.assert_in(u"d[u'a\u05d0b']", matches)
678 nt.assert_in(u"d[u'a\u05d0b']", matches)
679
679
680
680
681 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
681 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
682 def test_dict_key_completion_unicode_py3():
682 def test_dict_key_completion_unicode_py3():
683 """Test handling of unicode in dict key completion"""
683 """Test handling of unicode in dict key completion"""
684 ip = get_ipython()
684 ip = get_ipython()
685 complete = ip.Completer.complete
685 complete = ip.Completer.complete
686
686
687 ip.user_ns['d'] = {u'a\u05d0': None}
687 ip.user_ns['d'] = {u'a\u05d0': None}
688
688
689 # query using escape
689 # query using escape
690 _, matches = complete(line_buffer="d['a\\u05d0")
690 _, matches = complete(line_buffer="d['a\\u05d0")
691 nt.assert_in("u05d0", matches) # tokenized after \\
691 nt.assert_in("u05d0", matches) # tokenized after \\
692
692
693 # query using character
693 # query using character
694 _, matches = complete(line_buffer="d['a\u05d0")
694 _, matches = complete(line_buffer="d['a\u05d0")
695 nt.assert_in(u"a\u05d0", matches)
695 nt.assert_in(u"a\u05d0", matches)
696
696
697 with greedy_completion():
697 with greedy_completion():
698 # query using escape
698 # query using escape
699 _, matches = complete(line_buffer="d['a\\u05d0")
699 _, matches = complete(line_buffer="d['a\\u05d0")
700 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
700 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
701
701
702 # query using character
702 # query using character
703 _, matches = complete(line_buffer="d['a\u05d0")
703 _, matches = complete(line_buffer="d['a\u05d0")
704 nt.assert_in(u"d['a\u05d0']", matches)
704 nt.assert_in(u"d['a\u05d0']", matches)
705
705
706
706
707
707
708 @dec.skip_without('numpy')
708 @dec.skip_without('numpy')
709 def test_struct_array_key_completion():
709 def test_struct_array_key_completion():
710 """Test dict key completion applies to numpy struct arrays"""
710 """Test dict key completion applies to numpy struct arrays"""
711 import numpy
711 import numpy
712 ip = get_ipython()
712 ip = get_ipython()
713 complete = ip.Completer.complete
713 complete = ip.Completer.complete
714 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
714 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
715 _, matches = complete(line_buffer="d['")
715 _, matches = complete(line_buffer="d['")
716 nt.assert_in("hello", matches)
716 nt.assert_in("hello", matches)
717 nt.assert_in("world", matches)
717 nt.assert_in("world", matches)
718 # complete on the numpy struct itself
718 # complete on the numpy struct itself
719 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
719 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
720 ('my_data', '>f4', 5)])
720 ('my_data', '>f4', 5)])
721 x = numpy.zeros(2, dtype=dt)
721 x = numpy.zeros(2, dtype=dt)
722 ip.user_ns['d'] = x[1]
722 ip.user_ns['d'] = x[1]
723 _, matches = complete(line_buffer="d['")
723 _, matches = complete(line_buffer="d['")
724 nt.assert_in("my_head", matches)
724 nt.assert_in("my_head", matches)
725 nt.assert_in("my_data", matches)
725 nt.assert_in("my_data", matches)
726 # complete on a nested level
726 # complete on a nested level
727 with greedy_completion():
727 with greedy_completion():
728 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
728 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
729 _, matches = complete(line_buffer="d[1]['my_head']['")
729 _, matches = complete(line_buffer="d[1]['my_head']['")
730 nt.assert_true(any(["my_dt" in m for m in matches]))
730 nt.assert_true(any(["my_dt" in m for m in matches]))
731 nt.assert_true(any(["my_df" in m for m in matches]))
731 nt.assert_true(any(["my_df" in m for m in matches]))
732
732
733
733
734 @dec.skip_without('pandas')
734 @dec.skip_without('pandas')
735 def test_dataframe_key_completion():
735 def test_dataframe_key_completion():
736 """Test dict key completion applies to pandas DataFrames"""
736 """Test dict key completion applies to pandas DataFrames"""
737 import pandas
737 import pandas
738 ip = get_ipython()
738 ip = get_ipython()
739 complete = ip.Completer.complete
739 complete = ip.Completer.complete
740 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
740 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
741 _, matches = complete(line_buffer="d['")
741 _, matches = complete(line_buffer="d['")
742 nt.assert_in("hello", matches)
742 nt.assert_in("hello", matches)
743 nt.assert_in("world", matches)
743 nt.assert_in("world", matches)
744
744
745
745
746 def test_dict_key_completion_invalids():
746 def test_dict_key_completion_invalids():
747 """Smoke test cases dict key completion can't handle"""
747 """Smoke test cases dict key completion can't handle"""
748 ip = get_ipython()
748 ip = get_ipython()
749 complete = ip.Completer.complete
749 complete = ip.Completer.complete
750
750
751 ip.user_ns['no_getitem'] = None
751 ip.user_ns['no_getitem'] = None
752 ip.user_ns['no_keys'] = []
752 ip.user_ns['no_keys'] = []
753 ip.user_ns['cant_call_keys'] = dict
753 ip.user_ns['cant_call_keys'] = dict
754 ip.user_ns['empty'] = {}
754 ip.user_ns['empty'] = {}
755 ip.user_ns['d'] = {'abc': 5}
755 ip.user_ns['d'] = {'abc': 5}
756
756
757 _, matches = complete(line_buffer="no_getitem['")
757 _, matches = complete(line_buffer="no_getitem['")
758 _, matches = complete(line_buffer="no_keys['")
758 _, matches = complete(line_buffer="no_keys['")
759 _, matches = complete(line_buffer="cant_call_keys['")
759 _, matches = complete(line_buffer="cant_call_keys['")
760 _, matches = complete(line_buffer="empty['")
760 _, matches = complete(line_buffer="empty['")
761 _, matches = complete(line_buffer="name_error['")
761 _, matches = complete(line_buffer="name_error['")
762 _, matches = complete(line_buffer="d['\\") # incomplete escape
762 _, matches = complete(line_buffer="d['\\") # incomplete escape
763
763
764 class KeyCompletable(object):
765 def __init__(self, things=()):
766 self.things = things
767
768 def _ipy_key_completions_(self):
769 return list(self.things)
770
771 def test_object_key_completion():
772 ip = get_ipython()
773 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
774
775 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
776 nt.assert_in('qwerty', matches)
777 nt.assert_in('qwick', matches)
778
779
764 def test_aimport_module_completer():
780 def test_aimport_module_completer():
765 ip = get_ipython()
781 ip = get_ipython()
766 _, matches = ip.complete('i', '%aimport i')
782 _, matches = ip.complete('i', '%aimport i')
767 nt.assert_in('io', matches)
783 nt.assert_in('io', matches)
768 nt.assert_not_in('int', matches)
784 nt.assert_not_in('int', matches)
769
785
770 def test_import_module_completer():
786 def test_import_module_completer():
771 ip = get_ipython()
787 ip = get_ipython()
772 _, matches = ip.complete('i', 'import i')
788 _, matches = ip.complete('i', 'import i')
773 nt.assert_in('io', matches)
789 nt.assert_in('io', matches)
774 nt.assert_not_in('int', matches)
790 nt.assert_not_in('int', matches)
775
791
776 def test_from_module_completer():
792 def test_from_module_completer():
777 ip = get_ipython()
793 ip = get_ipython()
778 _, matches = ip.complete('B', 'from io import B')
794 _, matches = ip.complete('B', 'from io import B')
779 nt.assert_in('BytesIO', matches)
795 nt.assert_in('BytesIO', matches)
780 nt.assert_not_in('BaseException', matches)
796 nt.assert_not_in('BaseException', matches)
General Comments 0
You need to be logged in to leave comments. Login now