##// END OF EJS Templates
makes dictionary key completion use same delimiters as splitter
Jeff Hussmann -
Show More
@@ -1,1266 +1,1267 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Word completion for IPython.
2 """Word completion for IPython.
3
3
4 This module is a fork of the rlcompleter module in the Python standard
4 This module is a fork of the rlcompleter module in the Python standard
5 library. The original enhancements made to rlcompleter have been sent
5 library. The original enhancements made to rlcompleter have been sent
6 upstream and were accepted as of Python 2.3, but we need a lot more
6 upstream and were accepted as of Python 2.3, but we need a lot more
7 functionality specific to IPython, so this module will continue to live as an
7 functionality specific to IPython, so this module will continue to live as an
8 IPython-specific utility.
8 IPython-specific utility.
9
9
10 Original rlcompleter documentation:
10 Original rlcompleter documentation:
11
11
12 This requires the latest extension to the readline module (the
12 This requires the latest extension to the readline module (the
13 completes keywords, built-ins and globals in __main__; when completing
13 completes keywords, built-ins and globals in __main__; when completing
14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
15 completes its attributes.
15 completes its attributes.
16
16
17 It's very cool to do "import string" type "string.", hit the
17 It's very cool to do "import string" type "string.", hit the
18 completion key (twice), and see the list of names defined by the
18 completion key (twice), and see the list of names defined by the
19 string module!
19 string module!
20
20
21 Tip: to use the tab key as the completion key, call
21 Tip: to use the tab key as the completion key, call
22
22
23 readline.parse_and_bind("tab: complete")
23 readline.parse_and_bind("tab: complete")
24
24
25 Notes:
25 Notes:
26
26
27 - Exceptions raised by the completer function are *ignored* (and
27 - Exceptions raised by the completer function are *ignored* (and
28 generally cause the completion to fail). This is a feature -- since
28 generally cause the completion to fail). This is a feature -- since
29 readline sets the tty device in raw (or cbreak) mode, printing a
29 readline sets the tty device in raw (or cbreak) mode, printing a
30 traceback wouldn't work well without some complicated hoopla to save,
30 traceback wouldn't work well without some complicated hoopla to save,
31 reset and restore the tty state.
31 reset and restore the tty state.
32
32
33 - The evaluation of the NAME.NAME... form may cause arbitrary
33 - The evaluation of the NAME.NAME... form may cause arbitrary
34 application defined code to be executed if an object with a
34 application defined code to be executed if an object with a
35 ``__getattr__`` hook is found. Since it is the responsibility of the
35 ``__getattr__`` hook is found. Since it is the responsibility of the
36 application (or the user) to enable this feature, I consider this an
36 application (or the user) to enable this feature, I consider this an
37 acceptable risk. More complicated expressions (e.g. function calls or
37 acceptable risk. More complicated expressions (e.g. function calls or
38 indexing operations) are *not* evaluated.
38 indexing operations) are *not* evaluated.
39
39
40 - GNU readline is also used by the built-in functions input() and
40 - GNU readline is also used by the built-in functions input() and
41 raw_input(), and thus these also benefit/suffer from the completer
41 raw_input(), and thus these also benefit/suffer from the completer
42 features. Clearly an interactive application can benefit by
42 features. Clearly an interactive application can benefit by
43 specifying its own completer function and using raw_input() for all
43 specifying its own completer function and using raw_input() for all
44 its input.
44 its input.
45
45
46 - When the original stdin is not a tty device, GNU readline is never
46 - When the original stdin is not a tty device, GNU readline is never
47 used, and this module (and the readline module) are silently inactive.
47 used, and this module (and the readline module) are silently inactive.
48 """
48 """
49
49
50 # Copyright (c) IPython Development Team.
50 # Copyright (c) IPython Development Team.
51 # Distributed under the terms of the Modified BSD License.
51 # Distributed under the terms of the Modified BSD License.
52 #
52 #
53 # Some of this code originated from rlcompleter in the Python standard library
53 # Some of this code originated from rlcompleter in the Python standard library
54 # Copyright (C) 2001 Python Software Foundation, www.python.org
54 # Copyright (C) 2001 Python Software Foundation, www.python.org
55
55
56 import __main__
56 import __main__
57 import glob
57 import glob
58 import inspect
58 import inspect
59 import itertools
59 import itertools
60 import keyword
60 import keyword
61 import os
61 import os
62 import re
62 import re
63 import sys
63 import sys
64 import unicodedata
64 import unicodedata
65 import string
65 import string
66
66
67 from traitlets.config.configurable import Configurable
67 from traitlets.config.configurable import Configurable
68 from IPython.core.error import TryNext
68 from IPython.core.error import TryNext
69 from IPython.core.inputsplitter import ESC_MAGIC
69 from IPython.core.inputsplitter import ESC_MAGIC
70 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
70 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
71 from IPython.utils import generics
71 from IPython.utils import generics
72 from IPython.utils import io
72 from IPython.utils import io
73 from IPython.utils.decorators import undoc
73 from IPython.utils.decorators import undoc
74 from IPython.utils.dir2 import dir2
74 from IPython.utils.dir2 import dir2
75 from IPython.utils.process import arg_split
75 from IPython.utils.process import arg_split
76 from IPython.utils.py3compat import builtin_mod, string_types, PY3
76 from IPython.utils.py3compat import builtin_mod, string_types, PY3
77 from traitlets import CBool, Enum
77 from traitlets import CBool, Enum
78
78
79 #-----------------------------------------------------------------------------
79 #-----------------------------------------------------------------------------
80 # Globals
80 # Globals
81 #-----------------------------------------------------------------------------
81 #-----------------------------------------------------------------------------
82
82
83 # Public API
83 # Public API
84 __all__ = ['Completer','IPCompleter']
84 __all__ = ['Completer','IPCompleter']
85
85
86 if sys.platform == 'win32':
86 if sys.platform == 'win32':
87 PROTECTABLES = ' '
87 PROTECTABLES = ' '
88 else:
88 else:
89 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
89 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
90
90
91
91
92 #-----------------------------------------------------------------------------
92 #-----------------------------------------------------------------------------
93 # Main functions and classes
93 # Main functions and classes
94 #-----------------------------------------------------------------------------
94 #-----------------------------------------------------------------------------
95
95
96 def has_open_quotes(s):
96 def has_open_quotes(s):
97 """Return whether a string has open quotes.
97 """Return whether a string has open quotes.
98
98
99 This simply counts whether the number of quote characters of either type in
99 This simply counts whether the number of quote characters of either type in
100 the string is odd.
100 the string is odd.
101
101
102 Returns
102 Returns
103 -------
103 -------
104 If there is an open quote, the quote character is returned. Else, return
104 If there is an open quote, the quote character is returned. Else, return
105 False.
105 False.
106 """
106 """
107 # We check " first, then ', so complex cases with nested quotes will get
107 # We check " first, then ', so complex cases with nested quotes will get
108 # the " to take precedence.
108 # the " to take precedence.
109 if s.count('"') % 2:
109 if s.count('"') % 2:
110 return '"'
110 return '"'
111 elif s.count("'") % 2:
111 elif s.count("'") % 2:
112 return "'"
112 return "'"
113 else:
113 else:
114 return False
114 return False
115
115
116
116
117 def protect_filename(s):
117 def protect_filename(s):
118 """Escape a string to protect certain characters."""
118 """Escape a string to protect certain characters."""
119
119
120 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
120 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
121 for ch in s])
121 for ch in s])
122
122
123 def expand_user(path):
123 def expand_user(path):
124 """Expand '~'-style usernames in strings.
124 """Expand '~'-style usernames in strings.
125
125
126 This is similar to :func:`os.path.expanduser`, but it computes and returns
126 This is similar to :func:`os.path.expanduser`, but it computes and returns
127 extra information that will be useful if the input was being used in
127 extra information that will be useful if the input was being used in
128 computing completions, and you wish to return the completions with the
128 computing completions, and you wish to return the completions with the
129 original '~' instead of its expanded value.
129 original '~' instead of its expanded value.
130
130
131 Parameters
131 Parameters
132 ----------
132 ----------
133 path : str
133 path : str
134 String to be expanded. If no ~ is present, the output is the same as the
134 String to be expanded. If no ~ is present, the output is the same as the
135 input.
135 input.
136
136
137 Returns
137 Returns
138 -------
138 -------
139 newpath : str
139 newpath : str
140 Result of ~ expansion in the input path.
140 Result of ~ expansion in the input path.
141 tilde_expand : bool
141 tilde_expand : bool
142 Whether any expansion was performed or not.
142 Whether any expansion was performed or not.
143 tilde_val : str
143 tilde_val : str
144 The value that ~ was replaced with.
144 The value that ~ was replaced with.
145 """
145 """
146 # Default values
146 # Default values
147 tilde_expand = False
147 tilde_expand = False
148 tilde_val = ''
148 tilde_val = ''
149 newpath = path
149 newpath = path
150
150
151 if path.startswith('~'):
151 if path.startswith('~'):
152 tilde_expand = True
152 tilde_expand = True
153 rest = len(path)-1
153 rest = len(path)-1
154 newpath = os.path.expanduser(path)
154 newpath = os.path.expanduser(path)
155 if rest:
155 if rest:
156 tilde_val = newpath[:-rest]
156 tilde_val = newpath[:-rest]
157 else:
157 else:
158 tilde_val = newpath
158 tilde_val = newpath
159
159
160 return newpath, tilde_expand, tilde_val
160 return newpath, tilde_expand, tilde_val
161
161
162
162
163 def compress_user(path, tilde_expand, tilde_val):
163 def compress_user(path, tilde_expand, tilde_val):
164 """Does the opposite of expand_user, with its outputs.
164 """Does the opposite of expand_user, with its outputs.
165 """
165 """
166 if tilde_expand:
166 if tilde_expand:
167 return path.replace(tilde_val, '~')
167 return path.replace(tilde_val, '~')
168 else:
168 else:
169 return path
169 return path
170
170
171
171
172
172
173 def penalize_magics_key(word):
173 def penalize_magics_key(word):
174 """key for sorting that penalizes magic commands in the ordering
174 """key for sorting that penalizes magic commands in the ordering
175
175
176 Normal words are left alone.
176 Normal words are left alone.
177
177
178 Magic commands have the initial % moved to the end, e.g.
178 Magic commands have the initial % moved to the end, e.g.
179 %matplotlib is transformed as follows:
179 %matplotlib is transformed as follows:
180
180
181 %matplotlib -> matplotlib%
181 %matplotlib -> matplotlib%
182
182
183 [The choice of the final % is arbitrary.]
183 [The choice of the final % is arbitrary.]
184
184
185 Since "matplotlib" < "matplotlib%" as strings,
185 Since "matplotlib" < "matplotlib%" as strings,
186 "timeit" will appear before the magic "%timeit" in the ordering
186 "timeit" will appear before the magic "%timeit" in the ordering
187
187
188 For consistency, move "%%" to the end, so cell magics appear *after*
188 For consistency, move "%%" to the end, so cell magics appear *after*
189 line magics with the same name.
189 line magics with the same name.
190
190
191 A check is performed that there are no other "%" in the string;
191 A check is performed that there are no other "%" in the string;
192 if there are, then the string is not a magic command and is left unchanged.
192 if there are, then the string is not a magic command and is left unchanged.
193
193
194 """
194 """
195
195
196 # Move any % signs from start to end of the key
196 # Move any % signs from start to end of the key
197 # provided there are no others elsewhere in the string
197 # provided there are no others elsewhere in the string
198
198
199 if word[:2] == "%%":
199 if word[:2] == "%%":
200 if not "%" in word[2:]:
200 if not "%" in word[2:]:
201 return word[2:] + "%%"
201 return word[2:] + "%%"
202
202
203 if word[:1] == "%":
203 if word[:1] == "%":
204 if not "%" in word[1:]:
204 if not "%" in word[1:]:
205 return word[1:] + "%"
205 return word[1:] + "%"
206
206
207 return word
207 return word
208
208
209
209
210 @undoc
210 @undoc
211 class Bunch(object): pass
211 class Bunch(object): pass
212
212
213
213
214 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
214 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
215 GREEDY_DELIMS = ' =\r\n'
215 GREEDY_DELIMS = ' =\r\n'
216
216
217
217
218 class CompletionSplitter(object):
218 class CompletionSplitter(object):
219 """An object to split an input line in a manner similar to readline.
219 """An object to split an input line in a manner similar to readline.
220
220
221 By having our own implementation, we can expose readline-like completion in
221 By having our own implementation, we can expose readline-like completion in
222 a uniform manner to all frontends. This object only needs to be given the
222 a uniform manner to all frontends. This object only needs to be given the
223 line of text to be split and the cursor position on said line, and it
223 line of text to be split and the cursor position on said line, and it
224 returns the 'word' to be completed on at the cursor after splitting the
224 returns the 'word' to be completed on at the cursor after splitting the
225 entire line.
225 entire line.
226
226
227 What characters are used as splitting delimiters can be controlled by
227 What characters are used as splitting delimiters can be controlled by
228 setting the `delims` attribute (this is a property that internally
228 setting the `delims` attribute (this is a property that internally
229 automatically builds the necessary regular expression)"""
229 automatically builds the necessary regular expression)"""
230
230
231 # Private interface
231 # Private interface
232
232
233 # A string of delimiter characters. The default value makes sense for
233 # A string of delimiter characters. The default value makes sense for
234 # IPython's most typical usage patterns.
234 # IPython's most typical usage patterns.
235 _delims = DELIMS
235 _delims = DELIMS
236
236
237 # The expression (a normal string) to be compiled into a regular expression
237 # The expression (a normal string) to be compiled into a regular expression
238 # for actual splitting. We store it as an attribute mostly for ease of
238 # for actual splitting. We store it as an attribute mostly for ease of
239 # debugging, since this type of code can be so tricky to debug.
239 # debugging, since this type of code can be so tricky to debug.
240 _delim_expr = None
240 _delim_expr = None
241
241
242 # The regular expression that does the actual splitting
242 # The regular expression that does the actual splitting
243 _delim_re = None
243 _delim_re = None
244
244
245 def __init__(self, delims=None):
245 def __init__(self, delims=None):
246 delims = CompletionSplitter._delims if delims is None else delims
246 delims = CompletionSplitter._delims if delims is None else delims
247 self.delims = delims
247 self.delims = delims
248
248
249 @property
249 @property
250 def delims(self):
250 def delims(self):
251 """Return the string of delimiter characters."""
251 """Return the string of delimiter characters."""
252 return self._delims
252 return self._delims
253
253
254 @delims.setter
254 @delims.setter
255 def delims(self, delims):
255 def delims(self, delims):
256 """Set the delimiters for line splitting."""
256 """Set the delimiters for line splitting."""
257 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
257 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
258 self._delim_re = re.compile(expr)
258 self._delim_re = re.compile(expr)
259 self._delims = delims
259 self._delims = delims
260 self._delim_expr = expr
260 self._delim_expr = expr
261
261
262 def split_line(self, line, cursor_pos=None):
262 def split_line(self, line, cursor_pos=None):
263 """Split a line of text with a cursor at the given position.
263 """Split a line of text with a cursor at the given position.
264 """
264 """
265 l = line if cursor_pos is None else line[:cursor_pos]
265 l = line if cursor_pos is None else line[:cursor_pos]
266 return self._delim_re.split(l)[-1]
266 return self._delim_re.split(l)[-1]
267
267
268
268
269 class Completer(Configurable):
269 class Completer(Configurable):
270
270
271 greedy = CBool(False, config=True,
271 greedy = CBool(False, config=True,
272 help="""Activate greedy completion
272 help="""Activate greedy completion
273
273
274 This will enable completion on elements of lists, results of function calls, etc.,
274 This will enable completion on elements of lists, results of function calls, etc.,
275 but can be unsafe because the code is actually evaluated on TAB.
275 but can be unsafe because the code is actually evaluated on TAB.
276 """
276 """
277 )
277 )
278
278
279
279
280 def __init__(self, namespace=None, global_namespace=None, **kwargs):
280 def __init__(self, namespace=None, global_namespace=None, **kwargs):
281 """Create a new completer for the command line.
281 """Create a new completer for the command line.
282
282
283 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
283 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
284
284
285 If unspecified, the default namespace where completions are performed
285 If unspecified, the default namespace where completions are performed
286 is __main__ (technically, __main__.__dict__). Namespaces should be
286 is __main__ (technically, __main__.__dict__). Namespaces should be
287 given as dictionaries.
287 given as dictionaries.
288
288
289 An optional second namespace can be given. This allows the completer
289 An optional second namespace can be given. This allows the completer
290 to handle cases where both the local and global scopes need to be
290 to handle cases where both the local and global scopes need to be
291 distinguished.
291 distinguished.
292
292
293 Completer instances should be used as the completion mechanism of
293 Completer instances should be used as the completion mechanism of
294 readline via the set_completer() call:
294 readline via the set_completer() call:
295
295
296 readline.set_completer(Completer(my_namespace).complete)
296 readline.set_completer(Completer(my_namespace).complete)
297 """
297 """
298
298
299 # Don't bind to namespace quite yet, but flag whether the user wants a
299 # Don't bind to namespace quite yet, but flag whether the user wants a
300 # specific namespace or to use __main__.__dict__. This will allow us
300 # specific namespace or to use __main__.__dict__. This will allow us
301 # to bind to __main__.__dict__ at completion time, not now.
301 # to bind to __main__.__dict__ at completion time, not now.
302 if namespace is None:
302 if namespace is None:
303 self.use_main_ns = 1
303 self.use_main_ns = 1
304 else:
304 else:
305 self.use_main_ns = 0
305 self.use_main_ns = 0
306 self.namespace = namespace
306 self.namespace = namespace
307
307
308 # The global namespace, if given, can be bound directly
308 # The global namespace, if given, can be bound directly
309 if global_namespace is None:
309 if global_namespace is None:
310 self.global_namespace = {}
310 self.global_namespace = {}
311 else:
311 else:
312 self.global_namespace = global_namespace
312 self.global_namespace = global_namespace
313
313
314 super(Completer, self).__init__(**kwargs)
314 super(Completer, self).__init__(**kwargs)
315
315
316 def complete(self, text, state):
316 def complete(self, text, state):
317 """Return the next possible completion for 'text'.
317 """Return the next possible completion for 'text'.
318
318
319 This is called successively with state == 0, 1, 2, ... until it
319 This is called successively with state == 0, 1, 2, ... until it
320 returns None. The completion should begin with 'text'.
320 returns None. The completion should begin with 'text'.
321
321
322 """
322 """
323 if self.use_main_ns:
323 if self.use_main_ns:
324 self.namespace = __main__.__dict__
324 self.namespace = __main__.__dict__
325
325
326 if state == 0:
326 if state == 0:
327 if "." in text:
327 if "." in text:
328 self.matches = self.attr_matches(text)
328 self.matches = self.attr_matches(text)
329 else:
329 else:
330 self.matches = self.global_matches(text)
330 self.matches = self.global_matches(text)
331 try:
331 try:
332 return self.matches[state]
332 return self.matches[state]
333 except IndexError:
333 except IndexError:
334 return None
334 return None
335
335
336 def global_matches(self, text):
336 def global_matches(self, text):
337 """Compute matches when text is a simple name.
337 """Compute matches when text is a simple name.
338
338
339 Return a list of all keywords, built-in functions and names currently
339 Return a list of all keywords, built-in functions and names currently
340 defined in self.namespace or self.global_namespace that match.
340 defined in self.namespace or self.global_namespace that match.
341
341
342 """
342 """
343 #print 'Completer->global_matches, txt=%r' % text # dbg
343 #print 'Completer->global_matches, txt=%r' % text # dbg
344 matches = []
344 matches = []
345 match_append = matches.append
345 match_append = matches.append
346 n = len(text)
346 n = len(text)
347 for lst in [keyword.kwlist,
347 for lst in [keyword.kwlist,
348 builtin_mod.__dict__.keys(),
348 builtin_mod.__dict__.keys(),
349 self.namespace.keys(),
349 self.namespace.keys(),
350 self.global_namespace.keys()]:
350 self.global_namespace.keys()]:
351 for word in lst:
351 for word in lst:
352 if word[:n] == text and word != "__builtins__":
352 if word[:n] == text and word != "__builtins__":
353 match_append(word)
353 match_append(word)
354 return matches
354 return matches
355
355
356 def attr_matches(self, text):
356 def attr_matches(self, text):
357 """Compute matches when text contains a dot.
357 """Compute matches when text contains a dot.
358
358
359 Assuming the text is of the form NAME.NAME....[NAME], and is
359 Assuming the text is of the form NAME.NAME....[NAME], and is
360 evaluatable in self.namespace or self.global_namespace, it will be
360 evaluatable in self.namespace or self.global_namespace, it will be
361 evaluated and its attributes (as revealed by dir()) are used as
361 evaluated and its attributes (as revealed by dir()) are used as
362 possible completions. (For class instances, class members are are
362 possible completions. (For class instances, class members are are
363 also considered.)
363 also considered.)
364
364
365 WARNING: this can still invoke arbitrary C code, if an object
365 WARNING: this can still invoke arbitrary C code, if an object
366 with a __getattr__ hook is evaluated.
366 with a __getattr__ hook is evaluated.
367
367
368 """
368 """
369
369
370 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
370 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
371 # Another option, seems to work great. Catches things like ''.<tab>
371 # Another option, seems to work great. Catches things like ''.<tab>
372 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
372 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
373
373
374 if m:
374 if m:
375 expr, attr = m.group(1, 3)
375 expr, attr = m.group(1, 3)
376 elif self.greedy:
376 elif self.greedy:
377 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
377 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
378 if not m2:
378 if not m2:
379 return []
379 return []
380 expr, attr = m2.group(1,2)
380 expr, attr = m2.group(1,2)
381 else:
381 else:
382 return []
382 return []
383
383
384 try:
384 try:
385 obj = eval(expr, self.namespace)
385 obj = eval(expr, self.namespace)
386 except:
386 except:
387 try:
387 try:
388 obj = eval(expr, self.global_namespace)
388 obj = eval(expr, self.global_namespace)
389 except:
389 except:
390 return []
390 return []
391
391
392 if self.limit_to__all__ and hasattr(obj, '__all__'):
392 if self.limit_to__all__ and hasattr(obj, '__all__'):
393 words = get__all__entries(obj)
393 words = get__all__entries(obj)
394 else:
394 else:
395 words = dir2(obj)
395 words = dir2(obj)
396
396
397 try:
397 try:
398 words = generics.complete_object(obj, words)
398 words = generics.complete_object(obj, words)
399 except TryNext:
399 except TryNext:
400 pass
400 pass
401 except Exception:
401 except Exception:
402 # Silence errors from completion function
402 # Silence errors from completion function
403 #raise # dbg
403 #raise # dbg
404 pass
404 pass
405 # Build match list to return
405 # Build match list to return
406 n = len(attr)
406 n = len(attr)
407 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
407 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
408 return res
408 return res
409
409
410
410
411 def get__all__entries(obj):
411 def get__all__entries(obj):
412 """returns the strings in the __all__ attribute"""
412 """returns the strings in the __all__ attribute"""
413 try:
413 try:
414 words = getattr(obj, '__all__')
414 words = getattr(obj, '__all__')
415 except:
415 except:
416 return []
416 return []
417
417
418 return [w for w in words if isinstance(w, string_types)]
418 return [w for w in words if isinstance(w, string_types)]
419
419
420
420
421 def match_dict_keys(keys, prefix):
421 def match_dict_keys(keys, prefix, delims):
422 """Used by dict_key_matches, matching the prefix to a list of keys"""
422 """Used by dict_key_matches, matching the prefix to a list of keys"""
423 if not prefix:
423 if not prefix:
424 return None, 0, [repr(k) for k in keys
424 return None, 0, [repr(k) for k in keys
425 if isinstance(k, (string_types, bytes))]
425 if isinstance(k, (string_types, bytes))]
426 quote_match = re.search('["\']', prefix)
426 quote_match = re.search('["\']', prefix)
427 quote = quote_match.group()
427 quote = quote_match.group()
428 try:
428 try:
429 prefix_str = eval(prefix + quote, {})
429 prefix_str = eval(prefix + quote, {})
430 except Exception:
430 except Exception:
431 return None, 0, []
431 return None, 0, []
432
432
433 token_match = re.search(r'\w*$', prefix, re.UNICODE)
433 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
434 token_match = re.search(pattern, prefix, re.UNICODE)
434 token_start = token_match.start()
435 token_start = token_match.start()
435 token_prefix = token_match.group()
436 token_prefix = token_match.group()
436
437
437 # TODO: support bytes in Py3k
438 # TODO: support bytes in Py3k
438 matched = []
439 matched = []
439 for key in keys:
440 for key in keys:
440 try:
441 try:
441 if not key.startswith(prefix_str):
442 if not key.startswith(prefix_str):
442 continue
443 continue
443 except (AttributeError, TypeError, UnicodeError):
444 except (AttributeError, TypeError, UnicodeError):
444 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
445 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
445 continue
446 continue
446
447
447 # reformat remainder of key to begin with prefix
448 # reformat remainder of key to begin with prefix
448 rem = key[len(prefix_str):]
449 rem = key[len(prefix_str):]
449 # force repr wrapped in '
450 # force repr wrapped in '
450 rem_repr = repr(rem + '"')
451 rem_repr = repr(rem + '"')
451 if rem_repr.startswith('u') and prefix[0] not in 'uU':
452 if rem_repr.startswith('u') and prefix[0] not in 'uU':
452 # Found key is unicode, but prefix is Py2 string.
453 # Found key is unicode, but prefix is Py2 string.
453 # Therefore attempt to interpret key as string.
454 # Therefore attempt to interpret key as string.
454 try:
455 try:
455 rem_repr = repr(rem.encode('ascii') + '"')
456 rem_repr = repr(rem.encode('ascii') + '"')
456 except UnicodeEncodeError:
457 except UnicodeEncodeError:
457 continue
458 continue
458
459
459 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
460 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
460 if quote == '"':
461 if quote == '"':
461 # The entered prefix is quoted with ",
462 # The entered prefix is quoted with ",
462 # but the match is quoted with '.
463 # but the match is quoted with '.
463 # A contained " hence needs escaping for comparison:
464 # A contained " hence needs escaping for comparison:
464 rem_repr = rem_repr.replace('"', '\\"')
465 rem_repr = rem_repr.replace('"', '\\"')
465
466
466 # then reinsert prefix from start of token
467 # then reinsert prefix from start of token
467 matched.append('%s%s' % (token_prefix, rem_repr))
468 matched.append('%s%s' % (token_prefix, rem_repr))
468 return quote, token_start, matched
469 return quote, token_start, matched
469
470
470
471
471 def _safe_isinstance(obj, module, class_name):
472 def _safe_isinstance(obj, module, class_name):
472 """Checks if obj is an instance of module.class_name if loaded
473 """Checks if obj is an instance of module.class_name if loaded
473 """
474 """
474 return (module in sys.modules and
475 return (module in sys.modules and
475 isinstance(obj, getattr(__import__(module), class_name)))
476 isinstance(obj, getattr(__import__(module), class_name)))
476
477
477
478
478
479
479 def back_unicode_name_matches(text):
480 def back_unicode_name_matches(text):
480 u"""Match unicode characters back to unicode name
481 u"""Match unicode characters back to unicode name
481
482
482 This does β˜ƒ -> \\snowman
483 This does β˜ƒ -> \\snowman
483
484
484 Note that snowman is not a valid python3 combining character but will be expanded.
485 Note that snowman is not a valid python3 combining character but will be expanded.
485 Though it will not recombine back to the snowman character by the completion machinery.
486 Though it will not recombine back to the snowman character by the completion machinery.
486
487
487 This will not either back-complete standard sequences like \n, \b ...
488 This will not either back-complete standard sequences like \n, \b ...
488
489
489 Used on Python 3 only.
490 Used on Python 3 only.
490 """
491 """
491 if len(text)<2:
492 if len(text)<2:
492 return u'', ()
493 return u'', ()
493 maybe_slash = text[-2]
494 maybe_slash = text[-2]
494 if maybe_slash != '\\':
495 if maybe_slash != '\\':
495 return u'', ()
496 return u'', ()
496
497
497 char = text[-1]
498 char = text[-1]
498 # no expand on quote for completion in strings.
499 # no expand on quote for completion in strings.
499 # nor backcomplete standard ascii keys
500 # nor backcomplete standard ascii keys
500 if char in string.ascii_letters or char in ['"',"'"]:
501 if char in string.ascii_letters or char in ['"',"'"]:
501 return u'', ()
502 return u'', ()
502 try :
503 try :
503 unic = unicodedata.name(char)
504 unic = unicodedata.name(char)
504 return '\\'+char,['\\'+unic]
505 return '\\'+char,['\\'+unic]
505 except KeyError as e:
506 except KeyError as e:
506 pass
507 pass
507 return u'', ()
508 return u'', ()
508
509
509 def back_latex_name_matches(text):
510 def back_latex_name_matches(text):
510 u"""Match latex characters back to unicode name
511 u"""Match latex characters back to unicode name
511
512
512 This does ->\\sqrt
513 This does ->\\sqrt
513
514
514 Used on Python 3 only.
515 Used on Python 3 only.
515 """
516 """
516 if len(text)<2:
517 if len(text)<2:
517 return u'', ()
518 return u'', ()
518 maybe_slash = text[-2]
519 maybe_slash = text[-2]
519 if maybe_slash != '\\':
520 if maybe_slash != '\\':
520 return u'', ()
521 return u'', ()
521
522
522
523
523 char = text[-1]
524 char = text[-1]
524 # no expand on quote for completion in strings.
525 # no expand on quote for completion in strings.
525 # nor backcomplete standard ascii keys
526 # nor backcomplete standard ascii keys
526 if char in string.ascii_letters or char in ['"',"'"]:
527 if char in string.ascii_letters or char in ['"',"'"]:
527 return u'', ()
528 return u'', ()
528 try :
529 try :
529 latex = reverse_latex_symbol[char]
530 latex = reverse_latex_symbol[char]
530 # '\\' replace the \ as well
531 # '\\' replace the \ as well
531 return '\\'+char,[latex]
532 return '\\'+char,[latex]
532 except KeyError as e:
533 except KeyError as e:
533 pass
534 pass
534 return u'', ()
535 return u'', ()
535
536
536
537
537 class IPCompleter(Completer):
538 class IPCompleter(Completer):
538 """Extension of the completer class with IPython-specific features"""
539 """Extension of the completer class with IPython-specific features"""
539
540
540 def _greedy_changed(self, name, old, new):
541 def _greedy_changed(self, name, old, new):
541 """update the splitter and readline delims when greedy is changed"""
542 """update the splitter and readline delims when greedy is changed"""
542 if new:
543 if new:
543 self.splitter.delims = GREEDY_DELIMS
544 self.splitter.delims = GREEDY_DELIMS
544 else:
545 else:
545 self.splitter.delims = DELIMS
546 self.splitter.delims = DELIMS
546
547
547 if self.readline:
548 if self.readline:
548 self.readline.set_completer_delims(self.splitter.delims)
549 self.readline.set_completer_delims(self.splitter.delims)
549
550
550 merge_completions = CBool(True, config=True,
551 merge_completions = CBool(True, config=True,
551 help="""Whether to merge completion results into a single list
552 help="""Whether to merge completion results into a single list
552
553
553 If False, only the completion results from the first non-empty
554 If False, only the completion results from the first non-empty
554 completer will be returned.
555 completer will be returned.
555 """
556 """
556 )
557 )
557 omit__names = Enum((0,1,2), default_value=2, config=True,
558 omit__names = Enum((0,1,2), default_value=2, config=True,
558 help="""Instruct the completer to omit private method names
559 help="""Instruct the completer to omit private method names
559
560
560 Specifically, when completing on ``object.<tab>``.
561 Specifically, when completing on ``object.<tab>``.
561
562
562 When 2 [default]: all names that start with '_' will be excluded.
563 When 2 [default]: all names that start with '_' will be excluded.
563
564
564 When 1: all 'magic' names (``__foo__``) will be excluded.
565 When 1: all 'magic' names (``__foo__``) will be excluded.
565
566
566 When 0: nothing will be excluded.
567 When 0: nothing will be excluded.
567 """
568 """
568 )
569 )
569 limit_to__all__ = CBool(default_value=False, config=True,
570 limit_to__all__ = CBool(default_value=False, config=True,
570 help="""Instruct the completer to use __all__ for the completion
571 help="""Instruct the completer to use __all__ for the completion
571
572
572 Specifically, when completing on ``object.<tab>``.
573 Specifically, when completing on ``object.<tab>``.
573
574
574 When True: only those names in obj.__all__ will be included.
575 When True: only those names in obj.__all__ will be included.
575
576
576 When False [default]: the __all__ attribute is ignored
577 When False [default]: the __all__ attribute is ignored
577 """
578 """
578 )
579 )
579
580
580 def __init__(self, shell=None, namespace=None, global_namespace=None,
581 def __init__(self, shell=None, namespace=None, global_namespace=None,
581 use_readline=True, config=None, **kwargs):
582 use_readline=True, config=None, **kwargs):
582 """IPCompleter() -> completer
583 """IPCompleter() -> completer
583
584
584 Return a completer object suitable for use by the readline library
585 Return a completer object suitable for use by the readline library
585 via readline.set_completer().
586 via readline.set_completer().
586
587
587 Inputs:
588 Inputs:
588
589
589 - shell: a pointer to the ipython shell itself. This is needed
590 - shell: a pointer to the ipython shell itself. This is needed
590 because this completer knows about magic functions, and those can
591 because this completer knows about magic functions, and those can
591 only be accessed via the ipython instance.
592 only be accessed via the ipython instance.
592
593
593 - namespace: an optional dict where completions are performed.
594 - namespace: an optional dict where completions are performed.
594
595
595 - global_namespace: secondary optional dict for completions, to
596 - global_namespace: secondary optional dict for completions, to
596 handle cases (such as IPython embedded inside functions) where
597 handle cases (such as IPython embedded inside functions) where
597 both Python scopes are visible.
598 both Python scopes are visible.
598
599
599 use_readline : bool, optional
600 use_readline : bool, optional
600 If true, use the readline library. This completer can still function
601 If true, use the readline library. This completer can still function
601 without readline, though in that case callers must provide some extra
602 without readline, though in that case callers must provide some extra
602 information on each call about the current line."""
603 information on each call about the current line."""
603
604
604 self.magic_escape = ESC_MAGIC
605 self.magic_escape = ESC_MAGIC
605 self.splitter = CompletionSplitter()
606 self.splitter = CompletionSplitter()
606
607
607 # Readline configuration, only used by the rlcompleter method.
608 # Readline configuration, only used by the rlcompleter method.
608 if use_readline:
609 if use_readline:
609 # We store the right version of readline so that later code
610 # We store the right version of readline so that later code
610 import IPython.utils.rlineimpl as readline
611 import IPython.utils.rlineimpl as readline
611 self.readline = readline
612 self.readline = readline
612 else:
613 else:
613 self.readline = None
614 self.readline = None
614
615
615 # _greedy_changed() depends on splitter and readline being defined:
616 # _greedy_changed() depends on splitter and readline being defined:
616 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
617 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
617 config=config, **kwargs)
618 config=config, **kwargs)
618
619
619 # List where completion matches will be stored
620 # List where completion matches will be stored
620 self.matches = []
621 self.matches = []
621 self.shell = shell
622 self.shell = shell
622 # Regexp to split filenames with spaces in them
623 # Regexp to split filenames with spaces in them
623 self.space_name_re = re.compile(r'([^\\] )')
624 self.space_name_re = re.compile(r'([^\\] )')
624 # Hold a local ref. to glob.glob for speed
625 # Hold a local ref. to glob.glob for speed
625 self.glob = glob.glob
626 self.glob = glob.glob
626
627
627 # Determine if we are running on 'dumb' terminals, like (X)Emacs
628 # Determine if we are running on 'dumb' terminals, like (X)Emacs
628 # buffers, to avoid completion problems.
629 # buffers, to avoid completion problems.
629 term = os.environ.get('TERM','xterm')
630 term = os.environ.get('TERM','xterm')
630 self.dumb_terminal = term in ['dumb','emacs']
631 self.dumb_terminal = term in ['dumb','emacs']
631
632
632 # Special handling of backslashes needed in win32 platforms
633 # Special handling of backslashes needed in win32 platforms
633 if sys.platform == "win32":
634 if sys.platform == "win32":
634 self.clean_glob = self._clean_glob_win32
635 self.clean_glob = self._clean_glob_win32
635 else:
636 else:
636 self.clean_glob = self._clean_glob
637 self.clean_glob = self._clean_glob
637
638
638 #regexp to parse docstring for function signature
639 #regexp to parse docstring for function signature
639 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
640 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
640 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
641 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
641 #use this if positional argument name is also needed
642 #use this if positional argument name is also needed
642 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
643 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
643
644
644 # All active matcher routines for completion
645 # All active matcher routines for completion
645 self.matchers = [self.python_matches,
646 self.matchers = [self.python_matches,
646 self.file_matches,
647 self.file_matches,
647 self.magic_matches,
648 self.magic_matches,
648 self.python_func_kw_matches,
649 self.python_func_kw_matches,
649 self.dict_key_matches,
650 self.dict_key_matches,
650 ]
651 ]
651
652
652 def all_completions(self, text):
653 def all_completions(self, text):
653 """
654 """
654 Wrapper around the complete method for the benefit of emacs
655 Wrapper around the complete method for the benefit of emacs
655 and pydb.
656 and pydb.
656 """
657 """
657 return self.complete(text)[1]
658 return self.complete(text)[1]
658
659
659 def _clean_glob(self,text):
660 def _clean_glob(self,text):
660 return self.glob("%s*" % text)
661 return self.glob("%s*" % text)
661
662
662 def _clean_glob_win32(self,text):
663 def _clean_glob_win32(self,text):
663 return [f.replace("\\","/")
664 return [f.replace("\\","/")
664 for f in self.glob("%s*" % text)]
665 for f in self.glob("%s*" % text)]
665
666
666 def file_matches(self, text):
667 def file_matches(self, text):
667 """Match filenames, expanding ~USER type strings.
668 """Match filenames, expanding ~USER type strings.
668
669
669 Most of the seemingly convoluted logic in this completer is an
670 Most of the seemingly convoluted logic in this completer is an
670 attempt to handle filenames with spaces in them. And yet it's not
671 attempt to handle filenames with spaces in them. And yet it's not
671 quite perfect, because Python's readline doesn't expose all of the
672 quite perfect, because Python's readline doesn't expose all of the
672 GNU readline details needed for this to be done correctly.
673 GNU readline details needed for this to be done correctly.
673
674
674 For a filename with a space in it, the printed completions will be
675 For a filename with a space in it, the printed completions will be
675 only the parts after what's already been typed (instead of the
676 only the parts after what's already been typed (instead of the
676 full completions, as is normally done). I don't think with the
677 full completions, as is normally done). I don't think with the
677 current (as of Python 2.3) Python readline it's possible to do
678 current (as of Python 2.3) Python readline it's possible to do
678 better."""
679 better."""
679
680
680 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
681 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
681
682
682 # chars that require escaping with backslash - i.e. chars
683 # chars that require escaping with backslash - i.e. chars
683 # that readline treats incorrectly as delimiters, but we
684 # that readline treats incorrectly as delimiters, but we
684 # don't want to treat as delimiters in filename matching
685 # don't want to treat as delimiters in filename matching
685 # when escaped with backslash
686 # when escaped with backslash
686 if text.startswith('!'):
687 if text.startswith('!'):
687 text = text[1:]
688 text = text[1:]
688 text_prefix = '!'
689 text_prefix = '!'
689 else:
690 else:
690 text_prefix = ''
691 text_prefix = ''
691
692
692 text_until_cursor = self.text_until_cursor
693 text_until_cursor = self.text_until_cursor
693 # track strings with open quotes
694 # track strings with open quotes
694 open_quotes = has_open_quotes(text_until_cursor)
695 open_quotes = has_open_quotes(text_until_cursor)
695
696
696 if '(' in text_until_cursor or '[' in text_until_cursor:
697 if '(' in text_until_cursor or '[' in text_until_cursor:
697 lsplit = text
698 lsplit = text
698 else:
699 else:
699 try:
700 try:
700 # arg_split ~ shlex.split, but with unicode bugs fixed by us
701 # arg_split ~ shlex.split, but with unicode bugs fixed by us
701 lsplit = arg_split(text_until_cursor)[-1]
702 lsplit = arg_split(text_until_cursor)[-1]
702 except ValueError:
703 except ValueError:
703 # typically an unmatched ", or backslash without escaped char.
704 # typically an unmatched ", or backslash without escaped char.
704 if open_quotes:
705 if open_quotes:
705 lsplit = text_until_cursor.split(open_quotes)[-1]
706 lsplit = text_until_cursor.split(open_quotes)[-1]
706 else:
707 else:
707 return []
708 return []
708 except IndexError:
709 except IndexError:
709 # tab pressed on empty line
710 # tab pressed on empty line
710 lsplit = ""
711 lsplit = ""
711
712
712 if not open_quotes and lsplit != protect_filename(lsplit):
713 if not open_quotes and lsplit != protect_filename(lsplit):
713 # if protectables are found, do matching on the whole escaped name
714 # if protectables are found, do matching on the whole escaped name
714 has_protectables = True
715 has_protectables = True
715 text0,text = text,lsplit
716 text0,text = text,lsplit
716 else:
717 else:
717 has_protectables = False
718 has_protectables = False
718 text = os.path.expanduser(text)
719 text = os.path.expanduser(text)
719
720
720 if text == "":
721 if text == "":
721 return [text_prefix + protect_filename(f) for f in self.glob("*")]
722 return [text_prefix + protect_filename(f) for f in self.glob("*")]
722
723
723 # Compute the matches from the filesystem
724 # Compute the matches from the filesystem
724 m0 = self.clean_glob(text.replace('\\',''))
725 m0 = self.clean_glob(text.replace('\\',''))
725
726
726 if has_protectables:
727 if has_protectables:
727 # If we had protectables, we need to revert our changes to the
728 # If we had protectables, we need to revert our changes to the
728 # beginning of filename so that we don't double-write the part
729 # beginning of filename so that we don't double-write the part
729 # of the filename we have so far
730 # of the filename we have so far
730 len_lsplit = len(lsplit)
731 len_lsplit = len(lsplit)
731 matches = [text_prefix + text0 +
732 matches = [text_prefix + text0 +
732 protect_filename(f[len_lsplit:]) for f in m0]
733 protect_filename(f[len_lsplit:]) for f in m0]
733 else:
734 else:
734 if open_quotes:
735 if open_quotes:
735 # if we have a string with an open quote, we don't need to
736 # if we have a string with an open quote, we don't need to
736 # protect the names at all (and we _shouldn't_, as it
737 # protect the names at all (and we _shouldn't_, as it
737 # would cause bugs when the filesystem call is made).
738 # would cause bugs when the filesystem call is made).
738 matches = m0
739 matches = m0
739 else:
740 else:
740 matches = [text_prefix +
741 matches = [text_prefix +
741 protect_filename(f) for f in m0]
742 protect_filename(f) for f in m0]
742
743
743 #io.rprint('mm', matches) # dbg
744 #io.rprint('mm', matches) # dbg
744
745
745 # Mark directories in input list by appending '/' to their names.
746 # Mark directories in input list by appending '/' to their names.
746 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
747 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
747 return matches
748 return matches
748
749
749 def magic_matches(self, text):
750 def magic_matches(self, text):
750 """Match magics"""
751 """Match magics"""
751 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
752 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
752 # Get all shell magics now rather than statically, so magics loaded at
753 # Get all shell magics now rather than statically, so magics loaded at
753 # runtime show up too.
754 # runtime show up too.
754 lsm = self.shell.magics_manager.lsmagic()
755 lsm = self.shell.magics_manager.lsmagic()
755 line_magics = lsm['line']
756 line_magics = lsm['line']
756 cell_magics = lsm['cell']
757 cell_magics = lsm['cell']
757 pre = self.magic_escape
758 pre = self.magic_escape
758 pre2 = pre+pre
759 pre2 = pre+pre
759
760
760 # Completion logic:
761 # Completion logic:
761 # - user gives %%: only do cell magics
762 # - user gives %%: only do cell magics
762 # - user gives %: do both line and cell magics
763 # - user gives %: do both line and cell magics
763 # - no prefix: do both
764 # - no prefix: do both
764 # In other words, line magics are skipped if the user gives %% explicitly
765 # In other words, line magics are skipped if the user gives %% explicitly
765 bare_text = text.lstrip(pre)
766 bare_text = text.lstrip(pre)
766 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
767 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
767 if not text.startswith(pre2):
768 if not text.startswith(pre2):
768 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
769 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
769 return comp
770 return comp
770
771
771 def python_matches(self,text):
772 def python_matches(self,text):
772 """Match attributes or global python names"""
773 """Match attributes or global python names"""
773
774
774 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
775 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
775 if "." in text:
776 if "." in text:
776 try:
777 try:
777 matches = self.attr_matches(text)
778 matches = self.attr_matches(text)
778 if text.endswith('.') and self.omit__names:
779 if text.endswith('.') and self.omit__names:
779 if self.omit__names == 1:
780 if self.omit__names == 1:
780 # true if txt is _not_ a __ name, false otherwise:
781 # true if txt is _not_ a __ name, false otherwise:
781 no__name = (lambda txt:
782 no__name = (lambda txt:
782 re.match(r'.*\.__.*?__',txt) is None)
783 re.match(r'.*\.__.*?__',txt) is None)
783 else:
784 else:
784 # true if txt is _not_ a _ name, false otherwise:
785 # true if txt is _not_ a _ name, false otherwise:
785 no__name = (lambda txt:
786 no__name = (lambda txt:
786 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
787 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
787 matches = filter(no__name, matches)
788 matches = filter(no__name, matches)
788 except NameError:
789 except NameError:
789 # catches <undefined attributes>.<tab>
790 # catches <undefined attributes>.<tab>
790 matches = []
791 matches = []
791 else:
792 else:
792 matches = self.global_matches(text)
793 matches = self.global_matches(text)
793
794
794 return matches
795 return matches
795
796
796 def _default_arguments_from_docstring(self, doc):
797 def _default_arguments_from_docstring(self, doc):
797 """Parse the first line of docstring for call signature.
798 """Parse the first line of docstring for call signature.
798
799
799 Docstring should be of the form 'min(iterable[, key=func])\n'.
800 Docstring should be of the form 'min(iterable[, key=func])\n'.
800 It can also parse cython docstring of the form
801 It can also parse cython docstring of the form
801 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
802 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
802 """
803 """
803 if doc is None:
804 if doc is None:
804 return []
805 return []
805
806
806 #care only the firstline
807 #care only the firstline
807 line = doc.lstrip().splitlines()[0]
808 line = doc.lstrip().splitlines()[0]
808
809
809 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
810 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
810 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
811 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
811 sig = self.docstring_sig_re.search(line)
812 sig = self.docstring_sig_re.search(line)
812 if sig is None:
813 if sig is None:
813 return []
814 return []
814 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
815 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
815 sig = sig.groups()[0].split(',')
816 sig = sig.groups()[0].split(',')
816 ret = []
817 ret = []
817 for s in sig:
818 for s in sig:
818 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
819 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
819 ret += self.docstring_kwd_re.findall(s)
820 ret += self.docstring_kwd_re.findall(s)
820 return ret
821 return ret
821
822
822 def _default_arguments(self, obj):
823 def _default_arguments(self, obj):
823 """Return the list of default arguments of obj if it is callable,
824 """Return the list of default arguments of obj if it is callable,
824 or empty list otherwise."""
825 or empty list otherwise."""
825 call_obj = obj
826 call_obj = obj
826 ret = []
827 ret = []
827 if inspect.isbuiltin(obj):
828 if inspect.isbuiltin(obj):
828 pass
829 pass
829 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
830 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
830 if inspect.isclass(obj):
831 if inspect.isclass(obj):
831 #for cython embededsignature=True the constructor docstring
832 #for cython embededsignature=True the constructor docstring
832 #belongs to the object itself not __init__
833 #belongs to the object itself not __init__
833 ret += self._default_arguments_from_docstring(
834 ret += self._default_arguments_from_docstring(
834 getattr(obj, '__doc__', ''))
835 getattr(obj, '__doc__', ''))
835 # for classes, check for __init__,__new__
836 # for classes, check for __init__,__new__
836 call_obj = (getattr(obj, '__init__', None) or
837 call_obj = (getattr(obj, '__init__', None) or
837 getattr(obj, '__new__', None))
838 getattr(obj, '__new__', None))
838 # for all others, check if they are __call__able
839 # for all others, check if they are __call__able
839 elif hasattr(obj, '__call__'):
840 elif hasattr(obj, '__call__'):
840 call_obj = obj.__call__
841 call_obj = obj.__call__
841
842
842 ret += self._default_arguments_from_docstring(
843 ret += self._default_arguments_from_docstring(
843 getattr(call_obj, '__doc__', ''))
844 getattr(call_obj, '__doc__', ''))
844
845
845 try:
846 try:
846 args,_,_1,defaults = inspect.getargspec(call_obj)
847 args,_,_1,defaults = inspect.getargspec(call_obj)
847 if defaults:
848 if defaults:
848 ret+=args[-len(defaults):]
849 ret+=args[-len(defaults):]
849 except TypeError:
850 except TypeError:
850 pass
851 pass
851
852
852 return list(set(ret))
853 return list(set(ret))
853
854
854 def python_func_kw_matches(self,text):
855 def python_func_kw_matches(self,text):
855 """Match named parameters (kwargs) of the last open function"""
856 """Match named parameters (kwargs) of the last open function"""
856
857
857 if "." in text: # a parameter cannot be dotted
858 if "." in text: # a parameter cannot be dotted
858 return []
859 return []
859 try: regexp = self.__funcParamsRegex
860 try: regexp = self.__funcParamsRegex
860 except AttributeError:
861 except AttributeError:
861 regexp = self.__funcParamsRegex = re.compile(r'''
862 regexp = self.__funcParamsRegex = re.compile(r'''
862 '.*?(?<!\\)' | # single quoted strings or
863 '.*?(?<!\\)' | # single quoted strings or
863 ".*?(?<!\\)" | # double quoted strings or
864 ".*?(?<!\\)" | # double quoted strings or
864 \w+ | # identifier
865 \w+ | # identifier
865 \S # other characters
866 \S # other characters
866 ''', re.VERBOSE | re.DOTALL)
867 ''', re.VERBOSE | re.DOTALL)
867 # 1. find the nearest identifier that comes before an unclosed
868 # 1. find the nearest identifier that comes before an unclosed
868 # parenthesis before the cursor
869 # parenthesis before the cursor
869 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
870 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
870 tokens = regexp.findall(self.text_until_cursor)
871 tokens = regexp.findall(self.text_until_cursor)
871 tokens.reverse()
872 tokens.reverse()
872 iterTokens = iter(tokens); openPar = 0
873 iterTokens = iter(tokens); openPar = 0
873
874
874 for token in iterTokens:
875 for token in iterTokens:
875 if token == ')':
876 if token == ')':
876 openPar -= 1
877 openPar -= 1
877 elif token == '(':
878 elif token == '(':
878 openPar += 1
879 openPar += 1
879 if openPar > 0:
880 if openPar > 0:
880 # found the last unclosed parenthesis
881 # found the last unclosed parenthesis
881 break
882 break
882 else:
883 else:
883 return []
884 return []
884 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
885 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
885 ids = []
886 ids = []
886 isId = re.compile(r'\w+$').match
887 isId = re.compile(r'\w+$').match
887
888
888 while True:
889 while True:
889 try:
890 try:
890 ids.append(next(iterTokens))
891 ids.append(next(iterTokens))
891 if not isId(ids[-1]):
892 if not isId(ids[-1]):
892 ids.pop(); break
893 ids.pop(); break
893 if not next(iterTokens) == '.':
894 if not next(iterTokens) == '.':
894 break
895 break
895 except StopIteration:
896 except StopIteration:
896 break
897 break
897 # lookup the candidate callable matches either using global_matches
898 # lookup the candidate callable matches either using global_matches
898 # or attr_matches for dotted names
899 # or attr_matches for dotted names
899 if len(ids) == 1:
900 if len(ids) == 1:
900 callableMatches = self.global_matches(ids[0])
901 callableMatches = self.global_matches(ids[0])
901 else:
902 else:
902 callableMatches = self.attr_matches('.'.join(ids[::-1]))
903 callableMatches = self.attr_matches('.'.join(ids[::-1]))
903 argMatches = []
904 argMatches = []
904 for callableMatch in callableMatches:
905 for callableMatch in callableMatches:
905 try:
906 try:
906 namedArgs = self._default_arguments(eval(callableMatch,
907 namedArgs = self._default_arguments(eval(callableMatch,
907 self.namespace))
908 self.namespace))
908 except:
909 except:
909 continue
910 continue
910
911
911 for namedArg in namedArgs:
912 for namedArg in namedArgs:
912 if namedArg.startswith(text):
913 if namedArg.startswith(text):
913 argMatches.append("%s=" %namedArg)
914 argMatches.append("%s=" %namedArg)
914 return argMatches
915 return argMatches
915
916
916 def dict_key_matches(self, text):
917 def dict_key_matches(self, text):
917 "Match string keys in a dictionary, after e.g. 'foo[' "
918 "Match string keys in a dictionary, after e.g. 'foo[' "
918 def get_keys(obj):
919 def get_keys(obj):
919 # Only allow completion for known in-memory dict-like types
920 # Only allow completion for known in-memory dict-like types
920 if isinstance(obj, dict) or\
921 if isinstance(obj, dict) or\
921 _safe_isinstance(obj, 'pandas', 'DataFrame'):
922 _safe_isinstance(obj, 'pandas', 'DataFrame'):
922 try:
923 try:
923 return list(obj.keys())
924 return list(obj.keys())
924 except Exception:
925 except Exception:
925 return []
926 return []
926 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
927 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
927 _safe_isinstance(obj, 'numpy', 'void'):
928 _safe_isinstance(obj, 'numpy', 'void'):
928 return obj.dtype.names or []
929 return obj.dtype.names or []
929 return []
930 return []
930
931
931 try:
932 try:
932 regexps = self.__dict_key_regexps
933 regexps = self.__dict_key_regexps
933 except AttributeError:
934 except AttributeError:
934 dict_key_re_fmt = r'''(?x)
935 dict_key_re_fmt = r'''(?x)
935 ( # match dict-referring expression wrt greedy setting
936 ( # match dict-referring expression wrt greedy setting
936 %s
937 %s
937 )
938 )
938 \[ # open bracket
939 \[ # open bracket
939 \s* # and optional whitespace
940 \s* # and optional whitespace
940 ([uUbB]? # string prefix (r not handled)
941 ([uUbB]? # string prefix (r not handled)
941 (?: # unclosed string
942 (?: # unclosed string
942 '(?:[^']|(?<!\\)\\')*
943 '(?:[^']|(?<!\\)\\')*
943 |
944 |
944 "(?:[^"]|(?<!\\)\\")*
945 "(?:[^"]|(?<!\\)\\")*
945 )
946 )
946 )?
947 )?
947 $
948 $
948 '''
949 '''
949 regexps = self.__dict_key_regexps = {
950 regexps = self.__dict_key_regexps = {
950 False: re.compile(dict_key_re_fmt % '''
951 False: re.compile(dict_key_re_fmt % '''
951 # identifiers separated by .
952 # identifiers separated by .
952 (?!\d)\w+
953 (?!\d)\w+
953 (?:\.(?!\d)\w+)*
954 (?:\.(?!\d)\w+)*
954 '''),
955 '''),
955 True: re.compile(dict_key_re_fmt % '''
956 True: re.compile(dict_key_re_fmt % '''
956 .+
957 .+
957 ''')
958 ''')
958 }
959 }
959
960
960 match = regexps[self.greedy].search(self.text_until_cursor)
961 match = regexps[self.greedy].search(self.text_until_cursor)
961 if match is None:
962 if match is None:
962 return []
963 return []
963
964
964 expr, prefix = match.groups()
965 expr, prefix = match.groups()
965 try:
966 try:
966 obj = eval(expr, self.namespace)
967 obj = eval(expr, self.namespace)
967 except Exception:
968 except Exception:
968 try:
969 try:
969 obj = eval(expr, self.global_namespace)
970 obj = eval(expr, self.global_namespace)
970 except Exception:
971 except Exception:
971 return []
972 return []
972
973
973 keys = get_keys(obj)
974 keys = get_keys(obj)
974 if not keys:
975 if not keys:
975 return keys
976 return keys
976 closing_quote, token_offset, matches = match_dict_keys(keys, prefix)
977 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
977 if not matches:
978 if not matches:
978 return matches
979 return matches
979
980
980 # get the cursor position of
981 # get the cursor position of
981 # - the text being completed
982 # - the text being completed
982 # - the start of the key text
983 # - the start of the key text
983 # - the start of the completion
984 # - the start of the completion
984 text_start = len(self.text_until_cursor) - len(text)
985 text_start = len(self.text_until_cursor) - len(text)
985 if prefix:
986 if prefix:
986 key_start = match.start(2)
987 key_start = match.start(2)
987 completion_start = key_start + token_offset
988 completion_start = key_start + token_offset
988 else:
989 else:
989 key_start = completion_start = match.end()
990 key_start = completion_start = match.end()
990
991
991 # grab the leading prefix, to make sure all completions start with `text`
992 # grab the leading prefix, to make sure all completions start with `text`
992 if text_start > key_start:
993 if text_start > key_start:
993 leading = ''
994 leading = ''
994 else:
995 else:
995 leading = text[text_start:completion_start]
996 leading = text[text_start:completion_start]
996
997
997 # the index of the `[` character
998 # the index of the `[` character
998 bracket_idx = match.end(1)
999 bracket_idx = match.end(1)
999
1000
1000 # append closing quote and bracket as appropriate
1001 # append closing quote and bracket as appropriate
1001 # this is *not* appropriate if the opening quote or bracket is outside
1002 # this is *not* appropriate if the opening quote or bracket is outside
1002 # the text given to this method
1003 # the text given to this method
1003 suf = ''
1004 suf = ''
1004 continuation = self.line_buffer[len(self.text_until_cursor):]
1005 continuation = self.line_buffer[len(self.text_until_cursor):]
1005 if key_start > text_start and closing_quote:
1006 if key_start > text_start and closing_quote:
1006 # quotes were opened inside text, maybe close them
1007 # quotes were opened inside text, maybe close them
1007 if continuation.startswith(closing_quote):
1008 if continuation.startswith(closing_quote):
1008 continuation = continuation[len(closing_quote):]
1009 continuation = continuation[len(closing_quote):]
1009 else:
1010 else:
1010 suf += closing_quote
1011 suf += closing_quote
1011 if bracket_idx > text_start:
1012 if bracket_idx > text_start:
1012 # brackets were opened inside text, maybe close them
1013 # brackets were opened inside text, maybe close them
1013 if not continuation.startswith(']'):
1014 if not continuation.startswith(']'):
1014 suf += ']'
1015 suf += ']'
1015
1016
1016 return [leading + k + suf for k in matches]
1017 return [leading + k + suf for k in matches]
1017
1018
1018 def unicode_name_matches(self, text):
1019 def unicode_name_matches(self, text):
1019 u"""Match Latex-like syntax for unicode characters base
1020 u"""Match Latex-like syntax for unicode characters base
1020 on the name of the character.
1021 on the name of the character.
1021
1022
1022 This does \\GREEK SMALL LETTER ETA -> Ξ·
1023 This does \\GREEK SMALL LETTER ETA -> Ξ·
1023
1024
1024 Works only on valid python 3 identifier, or on combining characters that
1025 Works only on valid python 3 identifier, or on combining characters that
1025 will combine to form a valid identifier.
1026 will combine to form a valid identifier.
1026
1027
1027 Used on Python 3 only.
1028 Used on Python 3 only.
1028 """
1029 """
1029 slashpos = text.rfind('\\')
1030 slashpos = text.rfind('\\')
1030 if slashpos > -1:
1031 if slashpos > -1:
1031 s = text[slashpos+1:]
1032 s = text[slashpos+1:]
1032 try :
1033 try :
1033 unic = unicodedata.lookup(s)
1034 unic = unicodedata.lookup(s)
1034 # allow combining chars
1035 # allow combining chars
1035 if ('a'+unic).isidentifier():
1036 if ('a'+unic).isidentifier():
1036 return '\\'+s,[unic]
1037 return '\\'+s,[unic]
1037 except KeyError as e:
1038 except KeyError as e:
1038 pass
1039 pass
1039 return u'', []
1040 return u'', []
1040
1041
1041
1042
1042
1043
1043
1044
1044 def latex_matches(self, text):
1045 def latex_matches(self, text):
1045 u"""Match Latex syntax for unicode characters.
1046 u"""Match Latex syntax for unicode characters.
1046
1047
1047 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1048 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1048
1049
1049 Used on Python 3 only.
1050 Used on Python 3 only.
1050 """
1051 """
1051 slashpos = text.rfind('\\')
1052 slashpos = text.rfind('\\')
1052 if slashpos > -1:
1053 if slashpos > -1:
1053 s = text[slashpos:]
1054 s = text[slashpos:]
1054 if s in latex_symbols:
1055 if s in latex_symbols:
1055 # Try to complete a full latex symbol to unicode
1056 # Try to complete a full latex symbol to unicode
1056 # \\alpha -> Ξ±
1057 # \\alpha -> Ξ±
1057 return s, [latex_symbols[s]]
1058 return s, [latex_symbols[s]]
1058 else:
1059 else:
1059 # If a user has partially typed a latex symbol, give them
1060 # If a user has partially typed a latex symbol, give them
1060 # a full list of options \al -> [\aleph, \alpha]
1061 # a full list of options \al -> [\aleph, \alpha]
1061 matches = [k for k in latex_symbols if k.startswith(s)]
1062 matches = [k for k in latex_symbols if k.startswith(s)]
1062 return s, matches
1063 return s, matches
1063 return u'', []
1064 return u'', []
1064
1065
1065 def dispatch_custom_completer(self, text):
1066 def dispatch_custom_completer(self, text):
1066 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
1067 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
1067 line = self.line_buffer
1068 line = self.line_buffer
1068 if not line.strip():
1069 if not line.strip():
1069 return None
1070 return None
1070
1071
1071 # Create a little structure to pass all the relevant information about
1072 # Create a little structure to pass all the relevant information about
1072 # the current completion to any custom completer.
1073 # the current completion to any custom completer.
1073 event = Bunch()
1074 event = Bunch()
1074 event.line = line
1075 event.line = line
1075 event.symbol = text
1076 event.symbol = text
1076 cmd = line.split(None,1)[0]
1077 cmd = line.split(None,1)[0]
1077 event.command = cmd
1078 event.command = cmd
1078 event.text_until_cursor = self.text_until_cursor
1079 event.text_until_cursor = self.text_until_cursor
1079
1080
1080 #print "\ncustom:{%s]\n" % event # dbg
1081 #print "\ncustom:{%s]\n" % event # dbg
1081
1082
1082 # for foo etc, try also to find completer for %foo
1083 # for foo etc, try also to find completer for %foo
1083 if not cmd.startswith(self.magic_escape):
1084 if not cmd.startswith(self.magic_escape):
1084 try_magic = self.custom_completers.s_matches(
1085 try_magic = self.custom_completers.s_matches(
1085 self.magic_escape + cmd)
1086 self.magic_escape + cmd)
1086 else:
1087 else:
1087 try_magic = []
1088 try_magic = []
1088
1089
1089 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1090 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1090 try_magic,
1091 try_magic,
1091 self.custom_completers.flat_matches(self.text_until_cursor)):
1092 self.custom_completers.flat_matches(self.text_until_cursor)):
1092 #print "try",c # dbg
1093 #print "try",c # dbg
1093 try:
1094 try:
1094 res = c(event)
1095 res = c(event)
1095 if res:
1096 if res:
1096 # first, try case sensitive match
1097 # first, try case sensitive match
1097 withcase = [r for r in res if r.startswith(text)]
1098 withcase = [r for r in res if r.startswith(text)]
1098 if withcase:
1099 if withcase:
1099 return withcase
1100 return withcase
1100 # if none, then case insensitive ones are ok too
1101 # if none, then case insensitive ones are ok too
1101 text_low = text.lower()
1102 text_low = text.lower()
1102 return [r for r in res if r.lower().startswith(text_low)]
1103 return [r for r in res if r.lower().startswith(text_low)]
1103 except TryNext:
1104 except TryNext:
1104 pass
1105 pass
1105
1106
1106 return None
1107 return None
1107
1108
1108 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1109 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1109 """Find completions for the given text and line context.
1110 """Find completions for the given text and line context.
1110
1111
1111 Note that both the text and the line_buffer are optional, but at least
1112 Note that both the text and the line_buffer are optional, but at least
1112 one of them must be given.
1113 one of them must be given.
1113
1114
1114 Parameters
1115 Parameters
1115 ----------
1116 ----------
1116 text : string, optional
1117 text : string, optional
1117 Text to perform the completion on. If not given, the line buffer
1118 Text to perform the completion on. If not given, the line buffer
1118 is split using the instance's CompletionSplitter object.
1119 is split using the instance's CompletionSplitter object.
1119
1120
1120 line_buffer : string, optional
1121 line_buffer : string, optional
1121 If not given, the completer attempts to obtain the current line
1122 If not given, the completer attempts to obtain the current line
1122 buffer via readline. This keyword allows clients which are
1123 buffer via readline. This keyword allows clients which are
1123 requesting for text completions in non-readline contexts to inform
1124 requesting for text completions in non-readline contexts to inform
1124 the completer of the entire text.
1125 the completer of the entire text.
1125
1126
1126 cursor_pos : int, optional
1127 cursor_pos : int, optional
1127 Index of the cursor in the full line buffer. Should be provided by
1128 Index of the cursor in the full line buffer. Should be provided by
1128 remote frontends where kernel has no access to frontend state.
1129 remote frontends where kernel has no access to frontend state.
1129
1130
1130 Returns
1131 Returns
1131 -------
1132 -------
1132 text : str
1133 text : str
1133 Text that was actually used in the completion.
1134 Text that was actually used in the completion.
1134
1135
1135 matches : list
1136 matches : list
1136 A list of completion matches.
1137 A list of completion matches.
1137 """
1138 """
1138 # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1139 # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1139
1140
1140 # if the cursor position isn't given, the only sane assumption we can
1141 # if the cursor position isn't given, the only sane assumption we can
1141 # make is that it's at the end of the line (the common case)
1142 # make is that it's at the end of the line (the common case)
1142 if cursor_pos is None:
1143 if cursor_pos is None:
1143 cursor_pos = len(line_buffer) if text is None else len(text)
1144 cursor_pos = len(line_buffer) if text is None else len(text)
1144
1145
1145 if PY3:
1146 if PY3:
1146
1147
1147 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1148 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1148 latex_text, latex_matches = self.latex_matches(base_text)
1149 latex_text, latex_matches = self.latex_matches(base_text)
1149 if latex_matches:
1150 if latex_matches:
1150 return latex_text, latex_matches
1151 return latex_text, latex_matches
1151 name_text = ''
1152 name_text = ''
1152 name_matches = []
1153 name_matches = []
1153 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1154 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1154 name_text, name_matches = meth(base_text)
1155 name_text, name_matches = meth(base_text)
1155 if name_text:
1156 if name_text:
1156 return name_text, name_matches
1157 return name_text, name_matches
1157
1158
1158 # if text is either None or an empty string, rely on the line buffer
1159 # if text is either None or an empty string, rely on the line buffer
1159 if not text:
1160 if not text:
1160 text = self.splitter.split_line(line_buffer, cursor_pos)
1161 text = self.splitter.split_line(line_buffer, cursor_pos)
1161
1162
1162 # If no line buffer is given, assume the input text is all there was
1163 # If no line buffer is given, assume the input text is all there was
1163 if line_buffer is None:
1164 if line_buffer is None:
1164 line_buffer = text
1165 line_buffer = text
1165
1166
1166 self.line_buffer = line_buffer
1167 self.line_buffer = line_buffer
1167 self.text_until_cursor = self.line_buffer[:cursor_pos]
1168 self.text_until_cursor = self.line_buffer[:cursor_pos]
1168 # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1169 # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1169
1170
1170 # Start with a clean slate of completions
1171 # Start with a clean slate of completions
1171 self.matches[:] = []
1172 self.matches[:] = []
1172 custom_res = self.dispatch_custom_completer(text)
1173 custom_res = self.dispatch_custom_completer(text)
1173 if custom_res is not None:
1174 if custom_res is not None:
1174 # did custom completers produce something?
1175 # did custom completers produce something?
1175 self.matches = custom_res
1176 self.matches = custom_res
1176 else:
1177 else:
1177 # Extend the list of completions with the results of each
1178 # Extend the list of completions with the results of each
1178 # matcher, so we return results to the user from all
1179 # matcher, so we return results to the user from all
1179 # namespaces.
1180 # namespaces.
1180 if self.merge_completions:
1181 if self.merge_completions:
1181 self.matches = []
1182 self.matches = []
1182 for matcher in self.matchers:
1183 for matcher in self.matchers:
1183 try:
1184 try:
1184 self.matches.extend(matcher(text))
1185 self.matches.extend(matcher(text))
1185 except:
1186 except:
1186 # Show the ugly traceback if the matcher causes an
1187 # Show the ugly traceback if the matcher causes an
1187 # exception, but do NOT crash the kernel!
1188 # exception, but do NOT crash the kernel!
1188 sys.excepthook(*sys.exc_info())
1189 sys.excepthook(*sys.exc_info())
1189 else:
1190 else:
1190 for matcher in self.matchers:
1191 for matcher in self.matchers:
1191 self.matches = matcher(text)
1192 self.matches = matcher(text)
1192 if self.matches:
1193 if self.matches:
1193 break
1194 break
1194 # FIXME: we should extend our api to return a dict with completions for
1195 # FIXME: we should extend our api to return a dict with completions for
1195 # different types of objects. The rlcomplete() method could then
1196 # different types of objects. The rlcomplete() method could then
1196 # simply collapse the dict into a list for readline, but we'd have
1197 # simply collapse the dict into a list for readline, but we'd have
1197 # richer completion semantics in other evironments.
1198 # richer completion semantics in other evironments.
1198
1199
1199 # use penalize_magics_key to put magics after variables with same name
1200 # use penalize_magics_key to put magics after variables with same name
1200 self.matches = sorted(set(self.matches), key=penalize_magics_key)
1201 self.matches = sorted(set(self.matches), key=penalize_magics_key)
1201
1202
1202 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1203 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1203 return text, self.matches
1204 return text, self.matches
1204
1205
1205 def rlcomplete(self, text, state):
1206 def rlcomplete(self, text, state):
1206 """Return the state-th possible completion for 'text'.
1207 """Return the state-th possible completion for 'text'.
1207
1208
1208 This is called successively with state == 0, 1, 2, ... until it
1209 This is called successively with state == 0, 1, 2, ... until it
1209 returns None. The completion should begin with 'text'.
1210 returns None. The completion should begin with 'text'.
1210
1211
1211 Parameters
1212 Parameters
1212 ----------
1213 ----------
1213 text : string
1214 text : string
1214 Text to perform the completion on.
1215 Text to perform the completion on.
1215
1216
1216 state : int
1217 state : int
1217 Counter used by readline.
1218 Counter used by readline.
1218 """
1219 """
1219 if state==0:
1220 if state==0:
1220
1221
1221 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1222 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1222 cursor_pos = self.readline.get_endidx()
1223 cursor_pos = self.readline.get_endidx()
1223
1224
1224 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1225 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1225 # (text, line_buffer, cursor_pos) ) # dbg
1226 # (text, line_buffer, cursor_pos) ) # dbg
1226
1227
1227 # if there is only a tab on a line with only whitespace, instead of
1228 # if there is only a tab on a line with only whitespace, instead of
1228 # the mostly useless 'do you want to see all million completions'
1229 # the mostly useless 'do you want to see all million completions'
1229 # message, just do the right thing and give the user his tab!
1230 # message, just do the right thing and give the user his tab!
1230 # Incidentally, this enables pasting of tabbed text from an editor
1231 # Incidentally, this enables pasting of tabbed text from an editor
1231 # (as long as autoindent is off).
1232 # (as long as autoindent is off).
1232
1233
1233 # It should be noted that at least pyreadline still shows file
1234 # It should be noted that at least pyreadline still shows file
1234 # completions - is there a way around it?
1235 # completions - is there a way around it?
1235
1236
1236 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1237 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1237 # we don't interfere with their own tab-completion mechanism.
1238 # we don't interfere with their own tab-completion mechanism.
1238 if not (self.dumb_terminal or line_buffer.strip()):
1239 if not (self.dumb_terminal or line_buffer.strip()):
1239 self.readline.insert_text('\t')
1240 self.readline.insert_text('\t')
1240 sys.stdout.flush()
1241 sys.stdout.flush()
1241 return None
1242 return None
1242
1243
1243 # Note: debugging exceptions that may occur in completion is very
1244 # Note: debugging exceptions that may occur in completion is very
1244 # tricky, because readline unconditionally silences them. So if
1245 # tricky, because readline unconditionally silences them. So if
1245 # during development you suspect a bug in the completion code, turn
1246 # during development you suspect a bug in the completion code, turn
1246 # this flag on temporarily by uncommenting the second form (don't
1247 # this flag on temporarily by uncommenting the second form (don't
1247 # flip the value in the first line, as the '# dbg' marker can be
1248 # flip the value in the first line, as the '# dbg' marker can be
1248 # automatically detected and is used elsewhere).
1249 # automatically detected and is used elsewhere).
1249 DEBUG = False
1250 DEBUG = False
1250 #DEBUG = True # dbg
1251 #DEBUG = True # dbg
1251 if DEBUG:
1252 if DEBUG:
1252 try:
1253 try:
1253 self.complete(text, line_buffer, cursor_pos)
1254 self.complete(text, line_buffer, cursor_pos)
1254 except:
1255 except:
1255 import traceback; traceback.print_exc()
1256 import traceback; traceback.print_exc()
1256 else:
1257 else:
1257 # The normal production version is here
1258 # The normal production version is here
1258
1259
1259 # This method computes the self.matches array
1260 # This method computes the self.matches array
1260 self.complete(text, line_buffer, cursor_pos)
1261 self.complete(text, line_buffer, cursor_pos)
1261
1262
1262 try:
1263 try:
1263 return self.matches[state]
1264 return self.matches[state]
1264 except IndexError:
1265 except IndexError:
1265 return None
1266 return None
1266
1267
@@ -1,755 +1,760 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Tests for the IPython tab-completion machinery."""
2 """Tests for the IPython tab-completion machinery."""
3
3
4 # Copyright (c) IPython Development Team.
4 # Copyright (c) IPython Development Team.
5 # Distributed under the terms of the Modified BSD License.
5 # Distributed under the terms of the Modified BSD License.
6
6
7 import os
7 import os
8 import sys
8 import sys
9 import unittest
9 import unittest
10
10
11 from contextlib import contextmanager
11 from contextlib import contextmanager
12
12
13 import nose.tools as nt
13 import nose.tools as nt
14
14
15 from traitlets.config.loader import Config
15 from traitlets.config.loader import Config
16 from IPython.core import completer
16 from IPython.core import completer
17 from IPython.external.decorators import knownfailureif
17 from IPython.external.decorators import knownfailureif
18 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
18 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
19 from IPython.utils.generics import complete_object
19 from IPython.utils.generics import complete_object
20 from IPython.utils import py3compat
20 from IPython.utils import py3compat
21 from IPython.utils.py3compat import string_types, unicode_type
21 from IPython.utils.py3compat import string_types, unicode_type
22 from IPython.testing import decorators as dec
22 from IPython.testing import decorators as dec
23
23
24 #-----------------------------------------------------------------------------
24 #-----------------------------------------------------------------------------
25 # Test functions
25 # Test functions
26 #-----------------------------------------------------------------------------
26 #-----------------------------------------------------------------------------
27
27
28 @contextmanager
28 @contextmanager
29 def greedy_completion():
29 def greedy_completion():
30 ip = get_ipython()
30 ip = get_ipython()
31 greedy_original = ip.Completer.greedy
31 greedy_original = ip.Completer.greedy
32 try:
32 try:
33 ip.Completer.greedy = True
33 ip.Completer.greedy = True
34 yield
34 yield
35 finally:
35 finally:
36 ip.Completer.greedy = greedy_original
36 ip.Completer.greedy = greedy_original
37
37
38 def test_protect_filename():
38 def test_protect_filename():
39 pairs = [ ('abc','abc'),
39 pairs = [ ('abc','abc'),
40 (' abc',r'\ abc'),
40 (' abc',r'\ abc'),
41 ('a bc',r'a\ bc'),
41 ('a bc',r'a\ bc'),
42 ('a bc',r'a\ \ bc'),
42 ('a bc',r'a\ \ bc'),
43 (' bc',r'\ \ bc'),
43 (' bc',r'\ \ bc'),
44 ]
44 ]
45 # On posix, we also protect parens and other special characters
45 # On posix, we also protect parens and other special characters
46 if sys.platform != 'win32':
46 if sys.platform != 'win32':
47 pairs.extend( [('a(bc',r'a\(bc'),
47 pairs.extend( [('a(bc',r'a\(bc'),
48 ('a)bc',r'a\)bc'),
48 ('a)bc',r'a\)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
49 ('a( )bc',r'a\(\ \)bc'),
50 ('a[1]bc', r'a\[1\]bc'),
50 ('a[1]bc', r'a\[1\]bc'),
51 ('a{1}bc', r'a\{1\}bc'),
51 ('a{1}bc', r'a\{1\}bc'),
52 ('a#bc', r'a\#bc'),
52 ('a#bc', r'a\#bc'),
53 ('a?bc', r'a\?bc'),
53 ('a?bc', r'a\?bc'),
54 ('a=bc', r'a\=bc'),
54 ('a=bc', r'a\=bc'),
55 ('a\\bc', r'a\\bc'),
55 ('a\\bc', r'a\\bc'),
56 ('a|bc', r'a\|bc'),
56 ('a|bc', r'a\|bc'),
57 ('a;bc', r'a\;bc'),
57 ('a;bc', r'a\;bc'),
58 ('a:bc', r'a\:bc'),
58 ('a:bc', r'a\:bc'),
59 ("a'bc", r"a\'bc"),
59 ("a'bc", r"a\'bc"),
60 ('a*bc', r'a\*bc'),
60 ('a*bc', r'a\*bc'),
61 ('a"bc', r'a\"bc'),
61 ('a"bc', r'a\"bc'),
62 ('a^bc', r'a\^bc'),
62 ('a^bc', r'a\^bc'),
63 ('a&bc', r'a\&bc'),
63 ('a&bc', r'a\&bc'),
64 ] )
64 ] )
65 # run the actual tests
65 # run the actual tests
66 for s1, s2 in pairs:
66 for s1, s2 in pairs:
67 s1p = completer.protect_filename(s1)
67 s1p = completer.protect_filename(s1)
68 nt.assert_equal(s1p, s2)
68 nt.assert_equal(s1p, s2)
69
69
70
70
71 def check_line_split(splitter, test_specs):
71 def check_line_split(splitter, test_specs):
72 for part1, part2, split in test_specs:
72 for part1, part2, split in test_specs:
73 cursor_pos = len(part1)
73 cursor_pos = len(part1)
74 line = part1+part2
74 line = part1+part2
75 out = splitter.split_line(line, cursor_pos)
75 out = splitter.split_line(line, cursor_pos)
76 nt.assert_equal(out, split)
76 nt.assert_equal(out, split)
77
77
78
78
79 def test_line_split():
79 def test_line_split():
80 """Basic line splitter test with default specs."""
80 """Basic line splitter test with default specs."""
81 sp = completer.CompletionSplitter()
81 sp = completer.CompletionSplitter()
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
82 # The format of the test specs is: part1, part2, expected answer. Parts 1
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
84 # was at the end of part1. So an empty part2 represents someone hitting
84 # was at the end of part1. So an empty part2 represents someone hitting
85 # tab at the end of the line, the most common case.
85 # tab at the end of the line, the most common case.
86 t = [('run some/scrip', '', 'some/scrip'),
86 t = [('run some/scrip', '', 'some/scrip'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
88 ('echo $HOM', '', 'HOM'),
88 ('echo $HOM', '', 'HOM'),
89 ('print sys.pa', '', 'sys.pa'),
89 ('print sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
90 ('print(sys.pa', '', 'sys.pa'),
91 ("execfile('scripts/er", '', 'scripts/er'),
91 ("execfile('scripts/er", '', 'scripts/er'),
92 ('a[x.', '', 'x.'),
92 ('a[x.', '', 'x.'),
93 ('a[x.', 'y', 'x.'),
93 ('a[x.', 'y', 'x.'),
94 ('cd "some_file/', '', 'some_file/'),
94 ('cd "some_file/', '', 'some_file/'),
95 ]
95 ]
96 check_line_split(sp, t)
96 check_line_split(sp, t)
97 # Ensure splitting works OK with unicode by re-running the tests with
97 # Ensure splitting works OK with unicode by re-running the tests with
98 # all inputs turned into unicode
98 # all inputs turned into unicode
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
100
100
101
101
102 def test_custom_completion_error():
102 def test_custom_completion_error():
103 """Test that errors from custom attribute completers are silenced."""
103 """Test that errors from custom attribute completers are silenced."""
104 ip = get_ipython()
104 ip = get_ipython()
105 class A(object): pass
105 class A(object): pass
106 ip.user_ns['a'] = A()
106 ip.user_ns['a'] = A()
107
107
108 @complete_object.when_type(A)
108 @complete_object.when_type(A)
109 def complete_A(a, existing_completions):
109 def complete_A(a, existing_completions):
110 raise TypeError("this should be silenced")
110 raise TypeError("this should be silenced")
111
111
112 ip.complete("a.")
112 ip.complete("a.")
113
113
114
114
115 def test_unicode_completions():
115 def test_unicode_completions():
116 ip = get_ipython()
116 ip = get_ipython()
117 # Some strings that trigger different types of completion. Check them both
117 # Some strings that trigger different types of completion. Check them both
118 # in str and unicode forms
118 # in str and unicode forms
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
120 for t in s + list(map(unicode_type, s)):
120 for t in s + list(map(unicode_type, s)):
121 # We don't need to check exact completion values (they may change
121 # We don't need to check exact completion values (they may change
122 # depending on the state of the namespace, but at least no exceptions
122 # depending on the state of the namespace, but at least no exceptions
123 # should be thrown and the return value should be a pair of text, list
123 # should be thrown and the return value should be a pair of text, list
124 # values.
124 # values.
125 text, matches = ip.complete(t)
125 text, matches = ip.complete(t)
126 nt.assert_true(isinstance(text, string_types))
126 nt.assert_true(isinstance(text, string_types))
127 nt.assert_true(isinstance(matches, list))
127 nt.assert_true(isinstance(matches, list))
128
128
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
130 def test_latex_completions():
130 def test_latex_completions():
131 from IPython.core.latex_symbols import latex_symbols
131 from IPython.core.latex_symbols import latex_symbols
132 import random
132 import random
133 ip = get_ipython()
133 ip = get_ipython()
134 # Test some random unicode symbols
134 # Test some random unicode symbols
135 keys = random.sample(latex_symbols.keys(), 10)
135 keys = random.sample(latex_symbols.keys(), 10)
136 for k in keys:
136 for k in keys:
137 text, matches = ip.complete(k)
137 text, matches = ip.complete(k)
138 nt.assert_equal(len(matches),1)
138 nt.assert_equal(len(matches),1)
139 nt.assert_equal(text, k)
139 nt.assert_equal(text, k)
140 nt.assert_equal(matches[0], latex_symbols[k])
140 nt.assert_equal(matches[0], latex_symbols[k])
141 # Test a more complex line
141 # Test a more complex line
142 text, matches = ip.complete(u'print(\\alpha')
142 text, matches = ip.complete(u'print(\\alpha')
143 nt.assert_equals(text, u'\\alpha')
143 nt.assert_equals(text, u'\\alpha')
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
145 # Test multiple matching latex symbols
145 # Test multiple matching latex symbols
146 text, matches = ip.complete(u'\\al')
146 text, matches = ip.complete(u'\\al')
147 nt.assert_in('\\alpha', matches)
147 nt.assert_in('\\alpha', matches)
148 nt.assert_in('\\aleph', matches)
148 nt.assert_in('\\aleph', matches)
149
149
150
150
151
151
152
152
153 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
153 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
154 def test_back_latex_completion():
154 def test_back_latex_completion():
155 ip = get_ipython()
155 ip = get_ipython()
156
156
157 # do not return more than 1 matches fro \beta, only the latex one.
157 # do not return more than 1 matches fro \beta, only the latex one.
158 name, matches = ip.complete('\\Ξ²')
158 name, matches = ip.complete('\\Ξ²')
159 nt.assert_equal(len(matches), 1)
159 nt.assert_equal(len(matches), 1)
160 nt.assert_equal(matches[0], '\\beta')
160 nt.assert_equal(matches[0], '\\beta')
161
161
162 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
162 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
163 def test_back_unicode_completion():
163 def test_back_unicode_completion():
164 ip = get_ipython()
164 ip = get_ipython()
165
165
166 name, matches = ip.complete('\\β…€')
166 name, matches = ip.complete('\\β…€')
167 nt.assert_equal(len(matches), 1)
167 nt.assert_equal(len(matches), 1)
168 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
168 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
169
169
170
170
171 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
171 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
172 def test_forward_unicode_completion():
172 def test_forward_unicode_completion():
173 ip = get_ipython()
173 ip = get_ipython()
174
174
175 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
175 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
176 nt.assert_equal(len(matches), 1)
176 nt.assert_equal(len(matches), 1)
177 nt.assert_equal(matches[0], 'β…€')
177 nt.assert_equal(matches[0], 'β…€')
178
178
179 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
179 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
180 def test_no_ascii_back_completion():
180 def test_no_ascii_back_completion():
181 ip = get_ipython()
181 ip = get_ipython()
182 with TemporaryWorkingDirectory(): # Avoid any filename completions
182 with TemporaryWorkingDirectory(): # Avoid any filename completions
183 # single ascii letter that don't have yet completions
183 # single ascii letter that don't have yet completions
184 for letter in 'fjqyJMQVWY' :
184 for letter in 'fjqyJMQVWY' :
185 name, matches = ip.complete('\\'+letter)
185 name, matches = ip.complete('\\'+letter)
186 nt.assert_equal(matches, [])
186 nt.assert_equal(matches, [])
187
187
188
188
189
189
190
190
191 class CompletionSplitterTestCase(unittest.TestCase):
191 class CompletionSplitterTestCase(unittest.TestCase):
192 def setUp(self):
192 def setUp(self):
193 self.sp = completer.CompletionSplitter()
193 self.sp = completer.CompletionSplitter()
194
194
195 def test_delim_setting(self):
195 def test_delim_setting(self):
196 self.sp.delims = ' '
196 self.sp.delims = ' '
197 nt.assert_equal(self.sp.delims, ' ')
197 nt.assert_equal(self.sp.delims, ' ')
198 nt.assert_equal(self.sp._delim_expr, '[\ ]')
198 nt.assert_equal(self.sp._delim_expr, '[\ ]')
199
199
200 def test_spaces(self):
200 def test_spaces(self):
201 """Test with only spaces as split chars."""
201 """Test with only spaces as split chars."""
202 self.sp.delims = ' '
202 self.sp.delims = ' '
203 t = [('foo', '', 'foo'),
203 t = [('foo', '', 'foo'),
204 ('run foo', '', 'foo'),
204 ('run foo', '', 'foo'),
205 ('run foo', 'bar', 'foo'),
205 ('run foo', 'bar', 'foo'),
206 ]
206 ]
207 check_line_split(self.sp, t)
207 check_line_split(self.sp, t)
208
208
209
209
210 def test_has_open_quotes1():
210 def test_has_open_quotes1():
211 for s in ["'", "'''", "'hi' '"]:
211 for s in ["'", "'''", "'hi' '"]:
212 nt.assert_equal(completer.has_open_quotes(s), "'")
212 nt.assert_equal(completer.has_open_quotes(s), "'")
213
213
214
214
215 def test_has_open_quotes2():
215 def test_has_open_quotes2():
216 for s in ['"', '"""', '"hi" "']:
216 for s in ['"', '"""', '"hi" "']:
217 nt.assert_equal(completer.has_open_quotes(s), '"')
217 nt.assert_equal(completer.has_open_quotes(s), '"')
218
218
219
219
220 def test_has_open_quotes3():
220 def test_has_open_quotes3():
221 for s in ["''", "''' '''", "'hi' 'ipython'"]:
221 for s in ["''", "''' '''", "'hi' 'ipython'"]:
222 nt.assert_false(completer.has_open_quotes(s))
222 nt.assert_false(completer.has_open_quotes(s))
223
223
224
224
225 def test_has_open_quotes4():
225 def test_has_open_quotes4():
226 for s in ['""', '""" """', '"hi" "ipython"']:
226 for s in ['""', '""" """', '"hi" "ipython"']:
227 nt.assert_false(completer.has_open_quotes(s))
227 nt.assert_false(completer.has_open_quotes(s))
228
228
229
229
230 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
230 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
231 def test_abspath_file_completions():
231 def test_abspath_file_completions():
232 ip = get_ipython()
232 ip = get_ipython()
233 with TemporaryDirectory() as tmpdir:
233 with TemporaryDirectory() as tmpdir:
234 prefix = os.path.join(tmpdir, 'foo')
234 prefix = os.path.join(tmpdir, 'foo')
235 suffixes = ['1', '2']
235 suffixes = ['1', '2']
236 names = [prefix+s for s in suffixes]
236 names = [prefix+s for s in suffixes]
237 for n in names:
237 for n in names:
238 open(n, 'w').close()
238 open(n, 'w').close()
239
239
240 # Check simple completion
240 # Check simple completion
241 c = ip.complete(prefix)[1]
241 c = ip.complete(prefix)[1]
242 nt.assert_equal(c, names)
242 nt.assert_equal(c, names)
243
243
244 # Now check with a function call
244 # Now check with a function call
245 cmd = 'a = f("%s' % prefix
245 cmd = 'a = f("%s' % prefix
246 c = ip.complete(prefix, cmd)[1]
246 c = ip.complete(prefix, cmd)[1]
247 comp = [prefix+s for s in suffixes]
247 comp = [prefix+s for s in suffixes]
248 nt.assert_equal(c, comp)
248 nt.assert_equal(c, comp)
249
249
250
250
251 def test_local_file_completions():
251 def test_local_file_completions():
252 ip = get_ipython()
252 ip = get_ipython()
253 with TemporaryWorkingDirectory():
253 with TemporaryWorkingDirectory():
254 prefix = './foo'
254 prefix = './foo'
255 suffixes = ['1', '2']
255 suffixes = ['1', '2']
256 names = [prefix+s for s in suffixes]
256 names = [prefix+s for s in suffixes]
257 for n in names:
257 for n in names:
258 open(n, 'w').close()
258 open(n, 'w').close()
259
259
260 # Check simple completion
260 # Check simple completion
261 c = ip.complete(prefix)[1]
261 c = ip.complete(prefix)[1]
262 nt.assert_equal(c, names)
262 nt.assert_equal(c, names)
263
263
264 # Now check with a function call
264 # Now check with a function call
265 cmd = 'a = f("%s' % prefix
265 cmd = 'a = f("%s' % prefix
266 c = ip.complete(prefix, cmd)[1]
266 c = ip.complete(prefix, cmd)[1]
267 comp = [prefix+s for s in suffixes]
267 comp = [prefix+s for s in suffixes]
268 nt.assert_equal(c, comp)
268 nt.assert_equal(c, comp)
269
269
270
270
271 def test_greedy_completions():
271 def test_greedy_completions():
272 ip = get_ipython()
272 ip = get_ipython()
273 ip.ex('a=list(range(5))')
273 ip.ex('a=list(range(5))')
274 _,c = ip.complete('.',line='a[0].')
274 _,c = ip.complete('.',line='a[0].')
275 nt.assert_false('a[0].real' in c,
275 nt.assert_false('a[0].real' in c,
276 "Shouldn't have completed on a[0]: %s"%c)
276 "Shouldn't have completed on a[0]: %s"%c)
277 with greedy_completion():
277 with greedy_completion():
278 _,c = ip.complete('.',line='a[0].')
278 _,c = ip.complete('.',line='a[0].')
279 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
279 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
280
280
281
281
282 def test_omit__names():
282 def test_omit__names():
283 # also happens to test IPCompleter as a configurable
283 # also happens to test IPCompleter as a configurable
284 ip = get_ipython()
284 ip = get_ipython()
285 ip._hidden_attr = 1
285 ip._hidden_attr = 1
286 ip._x = {}
286 ip._x = {}
287 c = ip.Completer
287 c = ip.Completer
288 ip.ex('ip=get_ipython()')
288 ip.ex('ip=get_ipython()')
289 cfg = Config()
289 cfg = Config()
290 cfg.IPCompleter.omit__names = 0
290 cfg.IPCompleter.omit__names = 0
291 c.update_config(cfg)
291 c.update_config(cfg)
292 s,matches = c.complete('ip.')
292 s,matches = c.complete('ip.')
293 nt.assert_in('ip.__str__', matches)
293 nt.assert_in('ip.__str__', matches)
294 nt.assert_in('ip._hidden_attr', matches)
294 nt.assert_in('ip._hidden_attr', matches)
295 cfg.IPCompleter.omit__names = 1
295 cfg.IPCompleter.omit__names = 1
296 c.update_config(cfg)
296 c.update_config(cfg)
297 s,matches = c.complete('ip.')
297 s,matches = c.complete('ip.')
298 nt.assert_not_in('ip.__str__', matches)
298 nt.assert_not_in('ip.__str__', matches)
299 nt.assert_in('ip._hidden_attr', matches)
299 nt.assert_in('ip._hidden_attr', matches)
300 cfg.IPCompleter.omit__names = 2
300 cfg.IPCompleter.omit__names = 2
301 c.update_config(cfg)
301 c.update_config(cfg)
302 s,matches = c.complete('ip.')
302 s,matches = c.complete('ip.')
303 nt.assert_not_in('ip.__str__', matches)
303 nt.assert_not_in('ip.__str__', matches)
304 nt.assert_not_in('ip._hidden_attr', matches)
304 nt.assert_not_in('ip._hidden_attr', matches)
305 s,matches = c.complete('ip._x.')
305 s,matches = c.complete('ip._x.')
306 nt.assert_in('ip._x.keys', matches)
306 nt.assert_in('ip._x.keys', matches)
307 del ip._hidden_attr
307 del ip._hidden_attr
308
308
309
309
310 def test_limit_to__all__False_ok():
310 def test_limit_to__all__False_ok():
311 ip = get_ipython()
311 ip = get_ipython()
312 c = ip.Completer
312 c = ip.Completer
313 ip.ex('class D: x=24')
313 ip.ex('class D: x=24')
314 ip.ex('d=D()')
314 ip.ex('d=D()')
315 cfg = Config()
315 cfg = Config()
316 cfg.IPCompleter.limit_to__all__ = False
316 cfg.IPCompleter.limit_to__all__ = False
317 c.update_config(cfg)
317 c.update_config(cfg)
318 s, matches = c.complete('d.')
318 s, matches = c.complete('d.')
319 nt.assert_in('d.x', matches)
319 nt.assert_in('d.x', matches)
320
320
321
321
322 def test_limit_to__all__True_ok():
322 def test_limit_to__all__True_ok():
323 ip = get_ipython()
323 ip = get_ipython()
324 c = ip.Completer
324 c = ip.Completer
325 ip.ex('class D: x=24')
325 ip.ex('class D: x=24')
326 ip.ex('d=D()')
326 ip.ex('d=D()')
327 ip.ex("d.__all__=['z']")
327 ip.ex("d.__all__=['z']")
328 cfg = Config()
328 cfg = Config()
329 cfg.IPCompleter.limit_to__all__ = True
329 cfg.IPCompleter.limit_to__all__ = True
330 c.update_config(cfg)
330 c.update_config(cfg)
331 s, matches = c.complete('d.')
331 s, matches = c.complete('d.')
332 nt.assert_in('d.z', matches)
332 nt.assert_in('d.z', matches)
333 nt.assert_not_in('d.x', matches)
333 nt.assert_not_in('d.x', matches)
334
334
335
335
336 def test_get__all__entries_ok():
336 def test_get__all__entries_ok():
337 class A(object):
337 class A(object):
338 __all__ = ['x', 1]
338 __all__ = ['x', 1]
339 words = completer.get__all__entries(A())
339 words = completer.get__all__entries(A())
340 nt.assert_equal(words, ['x'])
340 nt.assert_equal(words, ['x'])
341
341
342
342
343 def test_get__all__entries_no__all__ok():
343 def test_get__all__entries_no__all__ok():
344 class A(object):
344 class A(object):
345 pass
345 pass
346 words = completer.get__all__entries(A())
346 words = completer.get__all__entries(A())
347 nt.assert_equal(words, [])
347 nt.assert_equal(words, [])
348
348
349
349
350 def test_func_kw_completions():
350 def test_func_kw_completions():
351 ip = get_ipython()
351 ip = get_ipython()
352 c = ip.Completer
352 c = ip.Completer
353 ip.ex('def myfunc(a=1,b=2): return a+b')
353 ip.ex('def myfunc(a=1,b=2): return a+b')
354 s, matches = c.complete(None, 'myfunc(1,b')
354 s, matches = c.complete(None, 'myfunc(1,b')
355 nt.assert_in('b=', matches)
355 nt.assert_in('b=', matches)
356 # Simulate completing with cursor right after b (pos==10):
356 # Simulate completing with cursor right after b (pos==10):
357 s, matches = c.complete(None, 'myfunc(1,b)', 10)
357 s, matches = c.complete(None, 'myfunc(1,b)', 10)
358 nt.assert_in('b=', matches)
358 nt.assert_in('b=', matches)
359 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
359 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
360 nt.assert_in('b=', matches)
360 nt.assert_in('b=', matches)
361 #builtin function
361 #builtin function
362 s, matches = c.complete(None, 'min(k, k')
362 s, matches = c.complete(None, 'min(k, k')
363 nt.assert_in('key=', matches)
363 nt.assert_in('key=', matches)
364
364
365
365
366 def test_default_arguments_from_docstring():
366 def test_default_arguments_from_docstring():
367 doc = min.__doc__
367 doc = min.__doc__
368 ip = get_ipython()
368 ip = get_ipython()
369 c = ip.Completer
369 c = ip.Completer
370 kwd = c._default_arguments_from_docstring(
370 kwd = c._default_arguments_from_docstring(
371 'min(iterable[, key=func]) -> value')
371 'min(iterable[, key=func]) -> value')
372 nt.assert_equal(kwd, ['key'])
372 nt.assert_equal(kwd, ['key'])
373 #with cython type etc
373 #with cython type etc
374 kwd = c._default_arguments_from_docstring(
374 kwd = c._default_arguments_from_docstring(
375 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
375 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
376 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
376 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
377 #white spaces
377 #white spaces
378 kwd = c._default_arguments_from_docstring(
378 kwd = c._default_arguments_from_docstring(
379 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
379 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
380 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
380 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
381
381
382 def test_line_magics():
382 def test_line_magics():
383 ip = get_ipython()
383 ip = get_ipython()
384 c = ip.Completer
384 c = ip.Completer
385 s, matches = c.complete(None, 'lsmag')
385 s, matches = c.complete(None, 'lsmag')
386 nt.assert_in('%lsmagic', matches)
386 nt.assert_in('%lsmagic', matches)
387 s, matches = c.complete(None, '%lsmag')
387 s, matches = c.complete(None, '%lsmag')
388 nt.assert_in('%lsmagic', matches)
388 nt.assert_in('%lsmagic', matches)
389
389
390
390
391 def test_cell_magics():
391 def test_cell_magics():
392 from IPython.core.magic import register_cell_magic
392 from IPython.core.magic import register_cell_magic
393
393
394 @register_cell_magic
394 @register_cell_magic
395 def _foo_cellm(line, cell):
395 def _foo_cellm(line, cell):
396 pass
396 pass
397
397
398 ip = get_ipython()
398 ip = get_ipython()
399 c = ip.Completer
399 c = ip.Completer
400
400
401 s, matches = c.complete(None, '_foo_ce')
401 s, matches = c.complete(None, '_foo_ce')
402 nt.assert_in('%%_foo_cellm', matches)
402 nt.assert_in('%%_foo_cellm', matches)
403 s, matches = c.complete(None, '%%_foo_ce')
403 s, matches = c.complete(None, '%%_foo_ce')
404 nt.assert_in('%%_foo_cellm', matches)
404 nt.assert_in('%%_foo_cellm', matches)
405
405
406
406
407 def test_line_cell_magics():
407 def test_line_cell_magics():
408 from IPython.core.magic import register_line_cell_magic
408 from IPython.core.magic import register_line_cell_magic
409
409
410 @register_line_cell_magic
410 @register_line_cell_magic
411 def _bar_cellm(line, cell):
411 def _bar_cellm(line, cell):
412 pass
412 pass
413
413
414 ip = get_ipython()
414 ip = get_ipython()
415 c = ip.Completer
415 c = ip.Completer
416
416
417 # The policy here is trickier, see comments in completion code. The
417 # The policy here is trickier, see comments in completion code. The
418 # returned values depend on whether the user passes %% or not explicitly,
418 # returned values depend on whether the user passes %% or not explicitly,
419 # and this will show a difference if the same name is both a line and cell
419 # and this will show a difference if the same name is both a line and cell
420 # magic.
420 # magic.
421 s, matches = c.complete(None, '_bar_ce')
421 s, matches = c.complete(None, '_bar_ce')
422 nt.assert_in('%_bar_cellm', matches)
422 nt.assert_in('%_bar_cellm', matches)
423 nt.assert_in('%%_bar_cellm', matches)
423 nt.assert_in('%%_bar_cellm', matches)
424 s, matches = c.complete(None, '%_bar_ce')
424 s, matches = c.complete(None, '%_bar_ce')
425 nt.assert_in('%_bar_cellm', matches)
425 nt.assert_in('%_bar_cellm', matches)
426 nt.assert_in('%%_bar_cellm', matches)
426 nt.assert_in('%%_bar_cellm', matches)
427 s, matches = c.complete(None, '%%_bar_ce')
427 s, matches = c.complete(None, '%%_bar_ce')
428 nt.assert_not_in('%_bar_cellm', matches)
428 nt.assert_not_in('%_bar_cellm', matches)
429 nt.assert_in('%%_bar_cellm', matches)
429 nt.assert_in('%%_bar_cellm', matches)
430
430
431
431
432 def test_magic_completion_order():
432 def test_magic_completion_order():
433
433
434 ip = get_ipython()
434 ip = get_ipython()
435 c = ip.Completer
435 c = ip.Completer
436
436
437 # Test ordering of magics and non-magics with the same name
437 # Test ordering of magics and non-magics with the same name
438 # We want the non-magic first
438 # We want the non-magic first
439
439
440 # Before importing matplotlib, there should only be one option:
440 # Before importing matplotlib, there should only be one option:
441
441
442 text, matches = c.complete('mat')
442 text, matches = c.complete('mat')
443 nt.assert_equal(matches, ["%matplotlib"])
443 nt.assert_equal(matches, ["%matplotlib"])
444
444
445
445
446 ip.run_cell("matplotlib = 1") # introduce name into namespace
446 ip.run_cell("matplotlib = 1") # introduce name into namespace
447
447
448 # After the import, there should be two options, ordered like this:
448 # After the import, there should be two options, ordered like this:
449 text, matches = c.complete('mat')
449 text, matches = c.complete('mat')
450 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
450 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
451
451
452
452
453 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
453 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
454
454
455 # Order of user variable and line and cell magics with same name:
455 # Order of user variable and line and cell magics with same name:
456 text, matches = c.complete('timeit')
456 text, matches = c.complete('timeit')
457 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
457 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
458
458
459
459
460 def test_dict_key_completion_string():
460 def test_dict_key_completion_string():
461 """Test dictionary key completion for string keys"""
461 """Test dictionary key completion for string keys"""
462 ip = get_ipython()
462 ip = get_ipython()
463 complete = ip.Completer.complete
463 complete = ip.Completer.complete
464
464
465 ip.user_ns['d'] = {'abc': None}
465 ip.user_ns['d'] = {'abc': None}
466
466
467 # check completion at different stages
467 # check completion at different stages
468 _, matches = complete(line_buffer="d[")
468 _, matches = complete(line_buffer="d[")
469 nt.assert_in("'abc'", matches)
469 nt.assert_in("'abc'", matches)
470 nt.assert_not_in("'abc']", matches)
470 nt.assert_not_in("'abc']", matches)
471
471
472 _, matches = complete(line_buffer="d['")
472 _, matches = complete(line_buffer="d['")
473 nt.assert_in("abc", matches)
473 nt.assert_in("abc", matches)
474 nt.assert_not_in("abc']", matches)
474 nt.assert_not_in("abc']", matches)
475
475
476 _, matches = complete(line_buffer="d['a")
476 _, matches = complete(line_buffer="d['a")
477 nt.assert_in("abc", matches)
477 nt.assert_in("abc", matches)
478 nt.assert_not_in("abc']", matches)
478 nt.assert_not_in("abc']", matches)
479
479
480 # check use of different quoting
480 # check use of different quoting
481 _, matches = complete(line_buffer="d[\"")
481 _, matches = complete(line_buffer="d[\"")
482 nt.assert_in("abc", matches)
482 nt.assert_in("abc", matches)
483 nt.assert_not_in('abc\"]', matches)
483 nt.assert_not_in('abc\"]', matches)
484
484
485 _, matches = complete(line_buffer="d[\"a")
485 _, matches = complete(line_buffer="d[\"a")
486 nt.assert_in("abc", matches)
486 nt.assert_in("abc", matches)
487 nt.assert_not_in('abc\"]', matches)
487 nt.assert_not_in('abc\"]', matches)
488
488
489 # check sensitivity to following context
489 # check sensitivity to following context
490 _, matches = complete(line_buffer="d[]", cursor_pos=2)
490 _, matches = complete(line_buffer="d[]", cursor_pos=2)
491 nt.assert_in("'abc'", matches)
491 nt.assert_in("'abc'", matches)
492
492
493 _, matches = complete(line_buffer="d['']", cursor_pos=3)
493 _, matches = complete(line_buffer="d['']", cursor_pos=3)
494 nt.assert_in("abc", matches)
494 nt.assert_in("abc", matches)
495 nt.assert_not_in("abc'", matches)
495 nt.assert_not_in("abc'", matches)
496 nt.assert_not_in("abc']", matches)
496 nt.assert_not_in("abc']", matches)
497
497
498 # check multiple solutions are correctly returned and that noise is not
498 # check multiple solutions are correctly returned and that noise is not
499 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
499 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
500 5: None}
500 5: None}
501
501
502 _, matches = complete(line_buffer="d['a")
502 _, matches = complete(line_buffer="d['a")
503 nt.assert_in("abc", matches)
503 nt.assert_in("abc", matches)
504 nt.assert_in("abd", matches)
504 nt.assert_in("abd", matches)
505 nt.assert_not_in("bad", matches)
505 nt.assert_not_in("bad", matches)
506 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
506 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
507
507
508 # check escaping and whitespace
508 # check escaping and whitespace
509 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
509 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
510 _, matches = complete(line_buffer="d['a")
510 _, matches = complete(line_buffer="d['a")
511 nt.assert_in("a\\nb", matches)
511 nt.assert_in("a\\nb", matches)
512 nt.assert_in("a\\'b", matches)
512 nt.assert_in("a\\'b", matches)
513 nt.assert_in("a\"b", matches)
513 nt.assert_in("a\"b", matches)
514 nt.assert_in("a word", matches)
514 nt.assert_in("a word", matches)
515 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
515 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
516
516
517 # - can complete on non-initial word of the string
517 # - can complete on non-initial word of the string
518 _, matches = complete(line_buffer="d['a w")
518 _, matches = complete(line_buffer="d['a w")
519 nt.assert_in("word", matches)
519 nt.assert_in("word", matches)
520
520
521 # - understands quote escaping
521 # - understands quote escaping
522 _, matches = complete(line_buffer="d['a\\'")
522 _, matches = complete(line_buffer="d['a\\'")
523 nt.assert_in("b", matches)
523 nt.assert_in("b", matches)
524
524
525 # - default quoting should work like repr
525 # - default quoting should work like repr
526 _, matches = complete(line_buffer="d[")
526 _, matches = complete(line_buffer="d[")
527 nt.assert_in("\"a'b\"", matches)
527 nt.assert_in("\"a'b\"", matches)
528
528
529 # - when opening quote with ", possible to match with unescaped apostrophe
529 # - when opening quote with ", possible to match with unescaped apostrophe
530 _, matches = complete(line_buffer="d[\"a'")
530 _, matches = complete(line_buffer="d[\"a'")
531 nt.assert_in("b", matches)
531 nt.assert_in("b", matches)
532
532
533 # need to not split at delims that readline won't split at
534 if '-' not in ip.Completer.splitter.delims:
535 ip.user_ns['d'] = {'before-after': None}
536 _, matches = complete(line_buffer="d['before-af")
537 nt.assert_in('before-after', matches)
533
538
534 def test_dict_key_completion_contexts():
539 def test_dict_key_completion_contexts():
535 """Test expression contexts in which dict key completion occurs"""
540 """Test expression contexts in which dict key completion occurs"""
536 ip = get_ipython()
541 ip = get_ipython()
537 complete = ip.Completer.complete
542 complete = ip.Completer.complete
538 d = {'abc': None}
543 d = {'abc': None}
539 ip.user_ns['d'] = d
544 ip.user_ns['d'] = d
540
545
541 class C:
546 class C:
542 data = d
547 data = d
543 ip.user_ns['C'] = C
548 ip.user_ns['C'] = C
544 ip.user_ns['get'] = lambda: d
549 ip.user_ns['get'] = lambda: d
545
550
546 def assert_no_completion(**kwargs):
551 def assert_no_completion(**kwargs):
547 _, matches = complete(**kwargs)
552 _, matches = complete(**kwargs)
548 nt.assert_not_in('abc', matches)
553 nt.assert_not_in('abc', matches)
549 nt.assert_not_in('abc\'', matches)
554 nt.assert_not_in('abc\'', matches)
550 nt.assert_not_in('abc\']', matches)
555 nt.assert_not_in('abc\']', matches)
551 nt.assert_not_in('\'abc\'', matches)
556 nt.assert_not_in('\'abc\'', matches)
552 nt.assert_not_in('\'abc\']', matches)
557 nt.assert_not_in('\'abc\']', matches)
553
558
554 def assert_completion(**kwargs):
559 def assert_completion(**kwargs):
555 _, matches = complete(**kwargs)
560 _, matches = complete(**kwargs)
556 nt.assert_in("'abc'", matches)
561 nt.assert_in("'abc'", matches)
557 nt.assert_not_in("'abc']", matches)
562 nt.assert_not_in("'abc']", matches)
558
563
559 # no completion after string closed, even if reopened
564 # no completion after string closed, even if reopened
560 assert_no_completion(line_buffer="d['a'")
565 assert_no_completion(line_buffer="d['a'")
561 assert_no_completion(line_buffer="d[\"a\"")
566 assert_no_completion(line_buffer="d[\"a\"")
562 assert_no_completion(line_buffer="d['a' + ")
567 assert_no_completion(line_buffer="d['a' + ")
563 assert_no_completion(line_buffer="d['a' + '")
568 assert_no_completion(line_buffer="d['a' + '")
564
569
565 # completion in non-trivial expressions
570 # completion in non-trivial expressions
566 assert_completion(line_buffer="+ d[")
571 assert_completion(line_buffer="+ d[")
567 assert_completion(line_buffer="(d[")
572 assert_completion(line_buffer="(d[")
568 assert_completion(line_buffer="C.data[")
573 assert_completion(line_buffer="C.data[")
569
574
570 # greedy flag
575 # greedy flag
571 def assert_completion(**kwargs):
576 def assert_completion(**kwargs):
572 _, matches = complete(**kwargs)
577 _, matches = complete(**kwargs)
573 nt.assert_in("get()['abc']", matches)
578 nt.assert_in("get()['abc']", matches)
574
579
575 assert_no_completion(line_buffer="get()[")
580 assert_no_completion(line_buffer="get()[")
576 with greedy_completion():
581 with greedy_completion():
577 assert_completion(line_buffer="get()[")
582 assert_completion(line_buffer="get()[")
578 assert_completion(line_buffer="get()['")
583 assert_completion(line_buffer="get()['")
579 assert_completion(line_buffer="get()['a")
584 assert_completion(line_buffer="get()['a")
580 assert_completion(line_buffer="get()['ab")
585 assert_completion(line_buffer="get()['ab")
581 assert_completion(line_buffer="get()['abc")
586 assert_completion(line_buffer="get()['abc")
582
587
583
588
584
589
585 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
590 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
586 def test_dict_key_completion_bytes():
591 def test_dict_key_completion_bytes():
587 """Test handling of bytes in dict key completion"""
592 """Test handling of bytes in dict key completion"""
588 ip = get_ipython()
593 ip = get_ipython()
589 complete = ip.Completer.complete
594 complete = ip.Completer.complete
590
595
591 ip.user_ns['d'] = {'abc': None, b'abd': None}
596 ip.user_ns['d'] = {'abc': None, b'abd': None}
592
597
593 _, matches = complete(line_buffer="d[")
598 _, matches = complete(line_buffer="d[")
594 nt.assert_in("'abc'", matches)
599 nt.assert_in("'abc'", matches)
595 nt.assert_in("b'abd'", matches)
600 nt.assert_in("b'abd'", matches)
596
601
597 if False: # not currently implemented
602 if False: # not currently implemented
598 _, matches = complete(line_buffer="d[b")
603 _, matches = complete(line_buffer="d[b")
599 nt.assert_in("b'abd'", matches)
604 nt.assert_in("b'abd'", matches)
600 nt.assert_not_in("b'abc'", matches)
605 nt.assert_not_in("b'abc'", matches)
601
606
602 _, matches = complete(line_buffer="d[b'")
607 _, matches = complete(line_buffer="d[b'")
603 nt.assert_in("abd", matches)
608 nt.assert_in("abd", matches)
604 nt.assert_not_in("abc", matches)
609 nt.assert_not_in("abc", matches)
605
610
606 _, matches = complete(line_buffer="d[B'")
611 _, matches = complete(line_buffer="d[B'")
607 nt.assert_in("abd", matches)
612 nt.assert_in("abd", matches)
608 nt.assert_not_in("abc", matches)
613 nt.assert_not_in("abc", matches)
609
614
610 _, matches = complete(line_buffer="d['")
615 _, matches = complete(line_buffer="d['")
611 nt.assert_in("abc", matches)
616 nt.assert_in("abc", matches)
612 nt.assert_not_in("abd", matches)
617 nt.assert_not_in("abd", matches)
613
618
614
619
615 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
620 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
616 def test_dict_key_completion_unicode_py2():
621 def test_dict_key_completion_unicode_py2():
617 """Test handling of unicode in dict key completion"""
622 """Test handling of unicode in dict key completion"""
618 ip = get_ipython()
623 ip = get_ipython()
619 complete = ip.Completer.complete
624 complete = ip.Completer.complete
620
625
621 ip.user_ns['d'] = {u'abc': None,
626 ip.user_ns['d'] = {u'abc': None,
622 u'a\u05d0b': None}
627 u'a\u05d0b': None}
623
628
624 _, matches = complete(line_buffer="d[")
629 _, matches = complete(line_buffer="d[")
625 nt.assert_in("u'abc'", matches)
630 nt.assert_in("u'abc'", matches)
626 nt.assert_in("u'a\\u05d0b'", matches)
631 nt.assert_in("u'a\\u05d0b'", matches)
627
632
628 _, matches = complete(line_buffer="d['a")
633 _, matches = complete(line_buffer="d['a")
629 nt.assert_in("abc", matches)
634 nt.assert_in("abc", matches)
630 nt.assert_not_in("a\\u05d0b", matches)
635 nt.assert_not_in("a\\u05d0b", matches)
631
636
632 _, matches = complete(line_buffer="d[u'a")
637 _, matches = complete(line_buffer="d[u'a")
633 nt.assert_in("abc", matches)
638 nt.assert_in("abc", matches)
634 nt.assert_in("a\\u05d0b", matches)
639 nt.assert_in("a\\u05d0b", matches)
635
640
636 _, matches = complete(line_buffer="d[U'a")
641 _, matches = complete(line_buffer="d[U'a")
637 nt.assert_in("abc", matches)
642 nt.assert_in("abc", matches)
638 nt.assert_in("a\\u05d0b", matches)
643 nt.assert_in("a\\u05d0b", matches)
639
644
640 # query using escape
645 # query using escape
641 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
646 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
642 nt.assert_in("u05d0b", matches) # tokenized after \\
647 nt.assert_in("u05d0b", matches) # tokenized after \\
643
648
644 # query using character
649 # query using character
645 _, matches = complete(line_buffer=u"d[u'a\u05d0")
650 _, matches = complete(line_buffer=u"d[u'a\u05d0")
646 nt.assert_in(u"a\u05d0b", matches)
651 nt.assert_in(u"a\u05d0b", matches)
647
652
648 with greedy_completion():
653 with greedy_completion():
649 _, matches = complete(line_buffer="d[")
654 _, matches = complete(line_buffer="d[")
650 nt.assert_in("d[u'abc']", matches)
655 nt.assert_in("d[u'abc']", matches)
651 nt.assert_in("d[u'a\\u05d0b']", matches)
656 nt.assert_in("d[u'a\\u05d0b']", matches)
652
657
653 _, matches = complete(line_buffer="d['a")
658 _, matches = complete(line_buffer="d['a")
654 nt.assert_in("d['abc']", matches)
659 nt.assert_in("d['abc']", matches)
655 nt.assert_not_in("d[u'a\\u05d0b']", matches)
660 nt.assert_not_in("d[u'a\\u05d0b']", matches)
656
661
657 _, matches = complete(line_buffer="d[u'a")
662 _, matches = complete(line_buffer="d[u'a")
658 nt.assert_in("d[u'abc']", matches)
663 nt.assert_in("d[u'abc']", matches)
659 nt.assert_in("d[u'a\\u05d0b']", matches)
664 nt.assert_in("d[u'a\\u05d0b']", matches)
660
665
661 _, matches = complete(line_buffer="d[U'a")
666 _, matches = complete(line_buffer="d[U'a")
662 nt.assert_in("d[U'abc']", matches)
667 nt.assert_in("d[U'abc']", matches)
663 nt.assert_in("d[U'a\\u05d0b']", matches)
668 nt.assert_in("d[U'a\\u05d0b']", matches)
664
669
665 # query using escape
670 # query using escape
666 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
671 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
667 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
672 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
668
673
669 # query using character
674 # query using character
670 _, matches = complete(line_buffer=u"d[u'a\u05d0")
675 _, matches = complete(line_buffer=u"d[u'a\u05d0")
671 nt.assert_in(u"d[u'a\u05d0b']", matches)
676 nt.assert_in(u"d[u'a\u05d0b']", matches)
672
677
673
678
674 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
679 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
675 def test_dict_key_completion_unicode_py3():
680 def test_dict_key_completion_unicode_py3():
676 """Test handling of unicode in dict key completion"""
681 """Test handling of unicode in dict key completion"""
677 ip = get_ipython()
682 ip = get_ipython()
678 complete = ip.Completer.complete
683 complete = ip.Completer.complete
679
684
680 ip.user_ns['d'] = {u'a\u05d0': None}
685 ip.user_ns['d'] = {u'a\u05d0': None}
681
686
682 # query using escape
687 # query using escape
683 _, matches = complete(line_buffer="d['a\\u05d0")
688 _, matches = complete(line_buffer="d['a\\u05d0")
684 nt.assert_in("u05d0", matches) # tokenized after \\
689 nt.assert_in("u05d0", matches) # tokenized after \\
685
690
686 # query using character
691 # query using character
687 _, matches = complete(line_buffer="d['a\u05d0")
692 _, matches = complete(line_buffer="d['a\u05d0")
688 nt.assert_in(u"a\u05d0", matches)
693 nt.assert_in(u"a\u05d0", matches)
689
694
690 with greedy_completion():
695 with greedy_completion():
691 # query using escape
696 # query using escape
692 _, matches = complete(line_buffer="d['a\\u05d0")
697 _, matches = complete(line_buffer="d['a\\u05d0")
693 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
698 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
694
699
695 # query using character
700 # query using character
696 _, matches = complete(line_buffer="d['a\u05d0")
701 _, matches = complete(line_buffer="d['a\u05d0")
697 nt.assert_in(u"d['a\u05d0']", matches)
702 nt.assert_in(u"d['a\u05d0']", matches)
698
703
699
704
700
705
701 @dec.skip_without('numpy')
706 @dec.skip_without('numpy')
702 def test_struct_array_key_completion():
707 def test_struct_array_key_completion():
703 """Test dict key completion applies to numpy struct arrays"""
708 """Test dict key completion applies to numpy struct arrays"""
704 import numpy
709 import numpy
705 ip = get_ipython()
710 ip = get_ipython()
706 complete = ip.Completer.complete
711 complete = ip.Completer.complete
707 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
712 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
708 _, matches = complete(line_buffer="d['")
713 _, matches = complete(line_buffer="d['")
709 nt.assert_in("hello", matches)
714 nt.assert_in("hello", matches)
710 nt.assert_in("world", matches)
715 nt.assert_in("world", matches)
711 # complete on the numpy struct itself
716 # complete on the numpy struct itself
712 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
717 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
713 ('my_data', '>f4', 5)])
718 ('my_data', '>f4', 5)])
714 x = numpy.zeros(2, dtype=dt)
719 x = numpy.zeros(2, dtype=dt)
715 ip.user_ns['d'] = x[1]
720 ip.user_ns['d'] = x[1]
716 _, matches = complete(line_buffer="d['")
721 _, matches = complete(line_buffer="d['")
717 nt.assert_in("my_head", matches)
722 nt.assert_in("my_head", matches)
718 nt.assert_in("my_data", matches)
723 nt.assert_in("my_data", matches)
719 # complete on a nested level
724 # complete on a nested level
720 with greedy_completion():
725 with greedy_completion():
721 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
726 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
722 _, matches = complete(line_buffer="d[1]['my_head']['")
727 _, matches = complete(line_buffer="d[1]['my_head']['")
723 nt.assert_true(any(["my_dt" in m for m in matches]))
728 nt.assert_true(any(["my_dt" in m for m in matches]))
724 nt.assert_true(any(["my_df" in m for m in matches]))
729 nt.assert_true(any(["my_df" in m for m in matches]))
725
730
726
731
727 @dec.skip_without('pandas')
732 @dec.skip_without('pandas')
728 def test_dataframe_key_completion():
733 def test_dataframe_key_completion():
729 """Test dict key completion applies to pandas DataFrames"""
734 """Test dict key completion applies to pandas DataFrames"""
730 import pandas
735 import pandas
731 ip = get_ipython()
736 ip = get_ipython()
732 complete = ip.Completer.complete
737 complete = ip.Completer.complete
733 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
738 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
734 _, matches = complete(line_buffer="d['")
739 _, matches = complete(line_buffer="d['")
735 nt.assert_in("hello", matches)
740 nt.assert_in("hello", matches)
736 nt.assert_in("world", matches)
741 nt.assert_in("world", matches)
737
742
738
743
739 def test_dict_key_completion_invalids():
744 def test_dict_key_completion_invalids():
740 """Smoke test cases dict key completion can't handle"""
745 """Smoke test cases dict key completion can't handle"""
741 ip = get_ipython()
746 ip = get_ipython()
742 complete = ip.Completer.complete
747 complete = ip.Completer.complete
743
748
744 ip.user_ns['no_getitem'] = None
749 ip.user_ns['no_getitem'] = None
745 ip.user_ns['no_keys'] = []
750 ip.user_ns['no_keys'] = []
746 ip.user_ns['cant_call_keys'] = dict
751 ip.user_ns['cant_call_keys'] = dict
747 ip.user_ns['empty'] = {}
752 ip.user_ns['empty'] = {}
748 ip.user_ns['d'] = {'abc': 5}
753 ip.user_ns['d'] = {'abc': 5}
749
754
750 _, matches = complete(line_buffer="no_getitem['")
755 _, matches = complete(line_buffer="no_getitem['")
751 _, matches = complete(line_buffer="no_keys['")
756 _, matches = complete(line_buffer="no_keys['")
752 _, matches = complete(line_buffer="cant_call_keys['")
757 _, matches = complete(line_buffer="cant_call_keys['")
753 _, matches = complete(line_buffer="empty['")
758 _, matches = complete(line_buffer="empty['")
754 _, matches = complete(line_buffer="name_error['")
759 _, matches = complete(line_buffer="name_error['")
755 _, matches = complete(line_buffer="d['\\") # incomplete escape
760 _, matches = complete(line_buffer="d['\\") # incomplete escape
General Comments 0
You need to be logged in to leave comments. Login now