##// END OF EJS Templates
Improved Windows path completion....
tmr232 -
Show More
@@ -1,1214 +1,1221 b''
1 # encoding: utf-8
1 # encoding: utf-8
2 """Word completion for IPython.
2 """Word completion for IPython.
3
3
4 This module started as fork of the rlcompleter module in the Python standard
4 This module started as fork of the rlcompleter module in the Python standard
5 library. The original enhancements made to rlcompleter have been sent
5 library. The original enhancements made to rlcompleter have been sent
6 upstream and were accepted as of Python 2.3,
6 upstream and were accepted as of Python 2.3,
7
7
8 """
8 """
9
9
10 # Copyright (c) IPython Development Team.
10 # Copyright (c) IPython Development Team.
11 # Distributed under the terms of the Modified BSD License.
11 # Distributed under the terms of the Modified BSD License.
12 #
12 #
13 # Some of this code originated from rlcompleter in the Python standard library
13 # Some of this code originated from rlcompleter in the Python standard library
14 # Copyright (C) 2001 Python Software Foundation, www.python.org
14 # Copyright (C) 2001 Python Software Foundation, www.python.org
15
15
16 from __future__ import print_function
16 from __future__ import print_function
17
17
18 import __main__
18 import __main__
19 import glob
19 import glob
20 import inspect
20 import inspect
21 import itertools
21 import itertools
22 import keyword
22 import keyword
23 import os
23 import os
24 import re
24 import re
25 import sys
25 import sys
26 import unicodedata
26 import unicodedata
27 import string
27 import string
28
28
29 from traitlets.config.configurable import Configurable
29 from traitlets.config.configurable import Configurable
30 from IPython.core.error import TryNext
30 from IPython.core.error import TryNext
31 from IPython.core.inputsplitter import ESC_MAGIC
31 from IPython.core.inputsplitter import ESC_MAGIC
32 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
32 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
33 from IPython.utils import generics
33 from IPython.utils import generics
34 from IPython.utils.decorators import undoc
34 from IPython.utils.decorators import undoc
35 from IPython.utils.dir2 import dir2, get_real_method
35 from IPython.utils.dir2 import dir2, get_real_method
36 from IPython.utils.process import arg_split
36 from IPython.utils.process import arg_split
37 from IPython.utils.py3compat import builtin_mod, string_types, PY3, cast_unicode_py2
37 from IPython.utils.py3compat import builtin_mod, string_types, PY3, cast_unicode_py2
38 from traitlets import Bool, Enum, observe
38 from traitlets import Bool, Enum, observe
39
39
40 from functools import wraps
40 from functools import wraps
41
41
42 #-----------------------------------------------------------------------------
42 #-----------------------------------------------------------------------------
43 # Globals
43 # Globals
44 #-----------------------------------------------------------------------------
44 #-----------------------------------------------------------------------------
45
45
46 # Public API
46 # Public API
47 __all__ = ['Completer','IPCompleter']
47 __all__ = ['Completer','IPCompleter']
48
48
49 if sys.platform == 'win32':
49 if sys.platform == 'win32':
50 PROTECTABLES = ' '
50 PROTECTABLES = ' '
51 else:
51 else:
52 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
52 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
53
53
54
54
55 #-----------------------------------------------------------------------------
55 #-----------------------------------------------------------------------------
56 # Work around BUG decorators.
56 # Work around BUG decorators.
57 #-----------------------------------------------------------------------------
57 #-----------------------------------------------------------------------------
58
58
59 def _strip_single_trailing_space(complete):
59 def _strip_single_trailing_space(complete):
60 """
60 """
61 This is a workaround for a weird IPython/Prompt_toolkit behavior,
61 This is a workaround for a weird IPython/Prompt_toolkit behavior,
62 that can be removed once we rely on a slightly more recent prompt_toolkit
62 that can be removed once we rely on a slightly more recent prompt_toolkit
63 version (likely > 1.0.3). So this can likely be removed in IPython 6.0
63 version (likely > 1.0.3). So this can likely be removed in IPython 6.0
64
64
65 cf https://github.com/ipython/ipython/issues/9658
65 cf https://github.com/ipython/ipython/issues/9658
66 and https://github.com/jonathanslenders/python-prompt-toolkit/pull/328
66 and https://github.com/jonathanslenders/python-prompt-toolkit/pull/328
67
67
68 The bug is due to the fact that in PTK the completer will reinvoke itself
68 The bug is due to the fact that in PTK the completer will reinvoke itself
69 after trying to completer to the longuest common prefix of all the
69 after trying to completer to the longuest common prefix of all the
70 completions, unless only one completion is available.
70 completions, unless only one completion is available.
71
71
72 This logic is faulty if the completion ends with space, which can happen in
72 This logic is faulty if the completion ends with space, which can happen in
73 case like::
73 case like::
74
74
75 from foo import im<ta>
75 from foo import im<ta>
76
76
77 which only matching completion is `import `. Note the leading space at the
77 which only matching completion is `import `. Note the leading space at the
78 end. So leaving a space at the end is a reasonable request, but for now
78 end. So leaving a space at the end is a reasonable request, but for now
79 we'll strip it.
79 we'll strip it.
80 """
80 """
81
81
82 @wraps(complete)
82 @wraps(complete)
83 def comp(*args, **kwargs):
83 def comp(*args, **kwargs):
84 text, matches = complete(*args, **kwargs)
84 text, matches = complete(*args, **kwargs)
85 if len(matches) == 1:
85 if len(matches) == 1:
86 return text, [matches[0].rstrip()]
86 return text, [matches[0].rstrip()]
87 return text, matches
87 return text, matches
88
88
89 return comp
89 return comp
90
90
91
91
92
92
93 #-----------------------------------------------------------------------------
93 #-----------------------------------------------------------------------------
94 # Main functions and classes
94 # Main functions and classes
95 #-----------------------------------------------------------------------------
95 #-----------------------------------------------------------------------------
96
96
97 def has_open_quotes(s):
97 def has_open_quotes(s):
98 """Return whether a string has open quotes.
98 """Return whether a string has open quotes.
99
99
100 This simply counts whether the number of quote characters of either type in
100 This simply counts whether the number of quote characters of either type in
101 the string is odd.
101 the string is odd.
102
102
103 Returns
103 Returns
104 -------
104 -------
105 If there is an open quote, the quote character is returned. Else, return
105 If there is an open quote, the quote character is returned. Else, return
106 False.
106 False.
107 """
107 """
108 # We check " first, then ', so complex cases with nested quotes will get
108 # We check " first, then ', so complex cases with nested quotes will get
109 # the " to take precedence.
109 # the " to take precedence.
110 if s.count('"') % 2:
110 if s.count('"') % 2:
111 return '"'
111 return '"'
112 elif s.count("'") % 2:
112 elif s.count("'") % 2:
113 return "'"
113 return "'"
114 else:
114 else:
115 return False
115 return False
116
116
117
117
118 def protect_filename(s):
118 def protect_filename(s):
119 """Escape a string to protect certain characters."""
119 """Escape a string to protect certain characters."""
120 if set(s) & set(PROTECTABLES):
120 if set(s) & set(PROTECTABLES):
121 if sys.platform == "win32":
121 if sys.platform == "win32":
122 return '"' + s + '"'
122 return '"' + s + '"'
123 else:
123 else:
124 return "".join(("\\" + c if c in PROTECTABLES else c) for c in s)
124 return "".join(("\\" + c if c in PROTECTABLES else c) for c in s)
125 else:
125 else:
126 return s
126 return s
127
127
128
128
129 def expand_user(path):
129 def expand_user(path):
130 """Expand '~'-style usernames in strings.
130 """Expand '~'-style usernames in strings.
131
131
132 This is similar to :func:`os.path.expanduser`, but it computes and returns
132 This is similar to :func:`os.path.expanduser`, but it computes and returns
133 extra information that will be useful if the input was being used in
133 extra information that will be useful if the input was being used in
134 computing completions, and you wish to return the completions with the
134 computing completions, and you wish to return the completions with the
135 original '~' instead of its expanded value.
135 original '~' instead of its expanded value.
136
136
137 Parameters
137 Parameters
138 ----------
138 ----------
139 path : str
139 path : str
140 String to be expanded. If no ~ is present, the output is the same as the
140 String to be expanded. If no ~ is present, the output is the same as the
141 input.
141 input.
142
142
143 Returns
143 Returns
144 -------
144 -------
145 newpath : str
145 newpath : str
146 Result of ~ expansion in the input path.
146 Result of ~ expansion in the input path.
147 tilde_expand : bool
147 tilde_expand : bool
148 Whether any expansion was performed or not.
148 Whether any expansion was performed or not.
149 tilde_val : str
149 tilde_val : str
150 The value that ~ was replaced with.
150 The value that ~ was replaced with.
151 """
151 """
152 # Default values
152 # Default values
153 tilde_expand = False
153 tilde_expand = False
154 tilde_val = ''
154 tilde_val = ''
155 newpath = path
155 newpath = path
156
156
157 if path.startswith('~'):
157 if path.startswith('~'):
158 tilde_expand = True
158 tilde_expand = True
159 rest = len(path)-1
159 rest = len(path)-1
160 newpath = os.path.expanduser(path)
160 newpath = os.path.expanduser(path)
161 if rest:
161 if rest:
162 tilde_val = newpath[:-rest]
162 tilde_val = newpath[:-rest]
163 else:
163 else:
164 tilde_val = newpath
164 tilde_val = newpath
165
165
166 return newpath, tilde_expand, tilde_val
166 return newpath, tilde_expand, tilde_val
167
167
168
168
169 def compress_user(path, tilde_expand, tilde_val):
169 def compress_user(path, tilde_expand, tilde_val):
170 """Does the opposite of expand_user, with its outputs.
170 """Does the opposite of expand_user, with its outputs.
171 """
171 """
172 if tilde_expand:
172 if tilde_expand:
173 return path.replace(tilde_val, '~')
173 return path.replace(tilde_val, '~')
174 else:
174 else:
175 return path
175 return path
176
176
177
177
178 def completions_sorting_key(word):
178 def completions_sorting_key(word):
179 """key for sorting completions
179 """key for sorting completions
180
180
181 This does several things:
181 This does several things:
182
182
183 - Lowercase all completions, so they are sorted alphabetically with
183 - Lowercase all completions, so they are sorted alphabetically with
184 upper and lower case words mingled
184 upper and lower case words mingled
185 - Demote any completions starting with underscores to the end
185 - Demote any completions starting with underscores to the end
186 - Insert any %magic and %%cellmagic completions in the alphabetical order
186 - Insert any %magic and %%cellmagic completions in the alphabetical order
187 by their name
187 by their name
188 """
188 """
189 # Case insensitive sort
189 # Case insensitive sort
190 word = word.lower()
190 word = word.lower()
191
191
192 prio1, prio2 = 0, 0
192 prio1, prio2 = 0, 0
193
193
194 if word.startswith('__'):
194 if word.startswith('__'):
195 prio1 = 2
195 prio1 = 2
196 elif word.startswith('_'):
196 elif word.startswith('_'):
197 prio1 = 1
197 prio1 = 1
198
198
199 if word.endswith('='):
199 if word.endswith('='):
200 prio1 = -1
200 prio1 = -1
201
201
202 if word.startswith('%%'):
202 if word.startswith('%%'):
203 # If there's another % in there, this is something else, so leave it alone
203 # If there's another % in there, this is something else, so leave it alone
204 if not "%" in word[2:]:
204 if not "%" in word[2:]:
205 word = word[2:]
205 word = word[2:]
206 prio2 = 2
206 prio2 = 2
207 elif word.startswith('%'):
207 elif word.startswith('%'):
208 if not "%" in word[1:]:
208 if not "%" in word[1:]:
209 word = word[1:]
209 word = word[1:]
210 prio2 = 1
210 prio2 = 1
211
211
212 return prio1, word, prio2
212 return prio1, word, prio2
213
213
214
214
215 @undoc
215 @undoc
216 class Bunch(object): pass
216 class Bunch(object): pass
217
217
218
218
219 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
219 if sys.platform == 'win32':
220 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
221 else:
222 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
223
220 GREEDY_DELIMS = ' =\r\n'
224 GREEDY_DELIMS = ' =\r\n'
221
225
222
226
223 class CompletionSplitter(object):
227 class CompletionSplitter(object):
224 """An object to split an input line in a manner similar to readline.
228 """An object to split an input line in a manner similar to readline.
225
229
226 By having our own implementation, we can expose readline-like completion in
230 By having our own implementation, we can expose readline-like completion in
227 a uniform manner to all frontends. This object only needs to be given the
231 a uniform manner to all frontends. This object only needs to be given the
228 line of text to be split and the cursor position on said line, and it
232 line of text to be split and the cursor position on said line, and it
229 returns the 'word' to be completed on at the cursor after splitting the
233 returns the 'word' to be completed on at the cursor after splitting the
230 entire line.
234 entire line.
231
235
232 What characters are used as splitting delimiters can be controlled by
236 What characters are used as splitting delimiters can be controlled by
233 setting the `delims` attribute (this is a property that internally
237 setting the `delims` attribute (this is a property that internally
234 automatically builds the necessary regular expression)"""
238 automatically builds the necessary regular expression)"""
235
239
236 # Private interface
240 # Private interface
237
241
238 # A string of delimiter characters. The default value makes sense for
242 # A string of delimiter characters. The default value makes sense for
239 # IPython's most typical usage patterns.
243 # IPython's most typical usage patterns.
240 _delims = DELIMS
244 _delims = DELIMS
241
245
242 # The expression (a normal string) to be compiled into a regular expression
246 # The expression (a normal string) to be compiled into a regular expression
243 # for actual splitting. We store it as an attribute mostly for ease of
247 # for actual splitting. We store it as an attribute mostly for ease of
244 # debugging, since this type of code can be so tricky to debug.
248 # debugging, since this type of code can be so tricky to debug.
245 _delim_expr = None
249 _delim_expr = None
246
250
247 # The regular expression that does the actual splitting
251 # The regular expression that does the actual splitting
248 _delim_re = None
252 _delim_re = None
249
253
250 def __init__(self, delims=None):
254 def __init__(self, delims=None):
251 delims = CompletionSplitter._delims if delims is None else delims
255 delims = CompletionSplitter._delims if delims is None else delims
252 self.delims = delims
256 self.delims = delims
253
257
254 @property
258 @property
255 def delims(self):
259 def delims(self):
256 """Return the string of delimiter characters."""
260 """Return the string of delimiter characters."""
257 return self._delims
261 return self._delims
258
262
259 @delims.setter
263 @delims.setter
260 def delims(self, delims):
264 def delims(self, delims):
261 """Set the delimiters for line splitting."""
265 """Set the delimiters for line splitting."""
262 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
266 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
263 self._delim_re = re.compile(expr)
267 self._delim_re = re.compile(expr)
264 self._delims = delims
268 self._delims = delims
265 self._delim_expr = expr
269 self._delim_expr = expr
266
270
267 def split_line(self, line, cursor_pos=None):
271 def split_line(self, line, cursor_pos=None):
268 """Split a line of text with a cursor at the given position.
272 """Split a line of text with a cursor at the given position.
269 """
273 """
270 l = line if cursor_pos is None else line[:cursor_pos]
274 l = line if cursor_pos is None else line[:cursor_pos]
271 return self._delim_re.split(l)[-1]
275 return self._delim_re.split(l)[-1]
272
276
273
277
274 class Completer(Configurable):
278 class Completer(Configurable):
275
279
276 greedy = Bool(False,
280 greedy = Bool(False,
277 help="""Activate greedy completion
281 help="""Activate greedy completion
278 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
282 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
279
283
280 This will enable completion on elements of lists, results of function calls, etc.,
284 This will enable completion on elements of lists, results of function calls, etc.,
281 but can be unsafe because the code is actually evaluated on TAB.
285 but can be unsafe because the code is actually evaluated on TAB.
282 """
286 """
283 ).tag(config=True)
287 ).tag(config=True)
284
288
285
289
286 def __init__(self, namespace=None, global_namespace=None, **kwargs):
290 def __init__(self, namespace=None, global_namespace=None, **kwargs):
287 """Create a new completer for the command line.
291 """Create a new completer for the command line.
288
292
289 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
293 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
290
294
291 If unspecified, the default namespace where completions are performed
295 If unspecified, the default namespace where completions are performed
292 is __main__ (technically, __main__.__dict__). Namespaces should be
296 is __main__ (technically, __main__.__dict__). Namespaces should be
293 given as dictionaries.
297 given as dictionaries.
294
298
295 An optional second namespace can be given. This allows the completer
299 An optional second namespace can be given. This allows the completer
296 to handle cases where both the local and global scopes need to be
300 to handle cases where both the local and global scopes need to be
297 distinguished.
301 distinguished.
298
302
299 Completer instances should be used as the completion mechanism of
303 Completer instances should be used as the completion mechanism of
300 readline via the set_completer() call:
304 readline via the set_completer() call:
301
305
302 readline.set_completer(Completer(my_namespace).complete)
306 readline.set_completer(Completer(my_namespace).complete)
303 """
307 """
304
308
305 # Don't bind to namespace quite yet, but flag whether the user wants a
309 # Don't bind to namespace quite yet, but flag whether the user wants a
306 # specific namespace or to use __main__.__dict__. This will allow us
310 # specific namespace or to use __main__.__dict__. This will allow us
307 # to bind to __main__.__dict__ at completion time, not now.
311 # to bind to __main__.__dict__ at completion time, not now.
308 if namespace is None:
312 if namespace is None:
309 self.use_main_ns = 1
313 self.use_main_ns = 1
310 else:
314 else:
311 self.use_main_ns = 0
315 self.use_main_ns = 0
312 self.namespace = namespace
316 self.namespace = namespace
313
317
314 # The global namespace, if given, can be bound directly
318 # The global namespace, if given, can be bound directly
315 if global_namespace is None:
319 if global_namespace is None:
316 self.global_namespace = {}
320 self.global_namespace = {}
317 else:
321 else:
318 self.global_namespace = global_namespace
322 self.global_namespace = global_namespace
319
323
320 super(Completer, self).__init__(**kwargs)
324 super(Completer, self).__init__(**kwargs)
321
325
322 def complete(self, text, state):
326 def complete(self, text, state):
323 """Return the next possible completion for 'text'.
327 """Return the next possible completion for 'text'.
324
328
325 This is called successively with state == 0, 1, 2, ... until it
329 This is called successively with state == 0, 1, 2, ... until it
326 returns None. The completion should begin with 'text'.
330 returns None. The completion should begin with 'text'.
327
331
328 """
332 """
329 if self.use_main_ns:
333 if self.use_main_ns:
330 self.namespace = __main__.__dict__
334 self.namespace = __main__.__dict__
331
335
332 if state == 0:
336 if state == 0:
333 if "." in text:
337 if "." in text:
334 self.matches = self.attr_matches(text)
338 self.matches = self.attr_matches(text)
335 else:
339 else:
336 self.matches = self.global_matches(text)
340 self.matches = self.global_matches(text)
337 try:
341 try:
338 return self.matches[state]
342 return self.matches[state]
339 except IndexError:
343 except IndexError:
340 return None
344 return None
341
345
342 def global_matches(self, text):
346 def global_matches(self, text):
343 """Compute matches when text is a simple name.
347 """Compute matches when text is a simple name.
344
348
345 Return a list of all keywords, built-in functions and names currently
349 Return a list of all keywords, built-in functions and names currently
346 defined in self.namespace or self.global_namespace that match.
350 defined in self.namespace or self.global_namespace that match.
347
351
348 """
352 """
349 matches = []
353 matches = []
350 match_append = matches.append
354 match_append = matches.append
351 n = len(text)
355 n = len(text)
352 for lst in [keyword.kwlist,
356 for lst in [keyword.kwlist,
353 builtin_mod.__dict__.keys(),
357 builtin_mod.__dict__.keys(),
354 self.namespace.keys(),
358 self.namespace.keys(),
355 self.global_namespace.keys()]:
359 self.global_namespace.keys()]:
356 for word in lst:
360 for word in lst:
357 if word[:n] == text and word != "__builtins__":
361 if word[:n] == text and word != "__builtins__":
358 match_append(word)
362 match_append(word)
359 return [cast_unicode_py2(m) for m in matches]
363 return [cast_unicode_py2(m) for m in matches]
360
364
361 def attr_matches(self, text):
365 def attr_matches(self, text):
362 """Compute matches when text contains a dot.
366 """Compute matches when text contains a dot.
363
367
364 Assuming the text is of the form NAME.NAME....[NAME], and is
368 Assuming the text is of the form NAME.NAME....[NAME], and is
365 evaluatable in self.namespace or self.global_namespace, it will be
369 evaluatable in self.namespace or self.global_namespace, it will be
366 evaluated and its attributes (as revealed by dir()) are used as
370 evaluated and its attributes (as revealed by dir()) are used as
367 possible completions. (For class instances, class members are are
371 possible completions. (For class instances, class members are are
368 also considered.)
372 also considered.)
369
373
370 WARNING: this can still invoke arbitrary C code, if an object
374 WARNING: this can still invoke arbitrary C code, if an object
371 with a __getattr__ hook is evaluated.
375 with a __getattr__ hook is evaluated.
372
376
373 """
377 """
374
378
375 # Another option, seems to work great. Catches things like ''.<tab>
379 # Another option, seems to work great. Catches things like ''.<tab>
376 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
380 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
377
381
378 if m:
382 if m:
379 expr, attr = m.group(1, 3)
383 expr, attr = m.group(1, 3)
380 elif self.greedy:
384 elif self.greedy:
381 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
385 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
382 if not m2:
386 if not m2:
383 return []
387 return []
384 expr, attr = m2.group(1,2)
388 expr, attr = m2.group(1,2)
385 else:
389 else:
386 return []
390 return []
387
391
388 try:
392 try:
389 obj = eval(expr, self.namespace)
393 obj = eval(expr, self.namespace)
390 except:
394 except:
391 try:
395 try:
392 obj = eval(expr, self.global_namespace)
396 obj = eval(expr, self.global_namespace)
393 except:
397 except:
394 return []
398 return []
395
399
396 if self.limit_to__all__ and hasattr(obj, '__all__'):
400 if self.limit_to__all__ and hasattr(obj, '__all__'):
397 words = get__all__entries(obj)
401 words = get__all__entries(obj)
398 else:
402 else:
399 words = dir2(obj)
403 words = dir2(obj)
400
404
401 try:
405 try:
402 words = generics.complete_object(obj, words)
406 words = generics.complete_object(obj, words)
403 except TryNext:
407 except TryNext:
404 pass
408 pass
405 except Exception:
409 except Exception:
406 # Silence errors from completion function
410 # Silence errors from completion function
407 #raise # dbg
411 #raise # dbg
408 pass
412 pass
409 # Build match list to return
413 # Build match list to return
410 n = len(attr)
414 n = len(attr)
411 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
415 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
412
416
413
417
414 def get__all__entries(obj):
418 def get__all__entries(obj):
415 """returns the strings in the __all__ attribute"""
419 """returns the strings in the __all__ attribute"""
416 try:
420 try:
417 words = getattr(obj, '__all__')
421 words = getattr(obj, '__all__')
418 except:
422 except:
419 return []
423 return []
420
424
421 return [cast_unicode_py2(w) for w in words if isinstance(w, string_types)]
425 return [cast_unicode_py2(w) for w in words if isinstance(w, string_types)]
422
426
423
427
424 def match_dict_keys(keys, prefix, delims):
428 def match_dict_keys(keys, prefix, delims):
425 """Used by dict_key_matches, matching the prefix to a list of keys"""
429 """Used by dict_key_matches, matching the prefix to a list of keys"""
426 if not prefix:
430 if not prefix:
427 return None, 0, [repr(k) for k in keys
431 return None, 0, [repr(k) for k in keys
428 if isinstance(k, (string_types, bytes))]
432 if isinstance(k, (string_types, bytes))]
429 quote_match = re.search('["\']', prefix)
433 quote_match = re.search('["\']', prefix)
430 quote = quote_match.group()
434 quote = quote_match.group()
431 try:
435 try:
432 prefix_str = eval(prefix + quote, {})
436 prefix_str = eval(prefix + quote, {})
433 except Exception:
437 except Exception:
434 return None, 0, []
438 return None, 0, []
435
439
436 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
440 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
437 token_match = re.search(pattern, prefix, re.UNICODE)
441 token_match = re.search(pattern, prefix, re.UNICODE)
438 token_start = token_match.start()
442 token_start = token_match.start()
439 token_prefix = token_match.group()
443 token_prefix = token_match.group()
440
444
441 # TODO: support bytes in Py3k
445 # TODO: support bytes in Py3k
442 matched = []
446 matched = []
443 for key in keys:
447 for key in keys:
444 try:
448 try:
445 if not key.startswith(prefix_str):
449 if not key.startswith(prefix_str):
446 continue
450 continue
447 except (AttributeError, TypeError, UnicodeError):
451 except (AttributeError, TypeError, UnicodeError):
448 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
452 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
449 continue
453 continue
450
454
451 # reformat remainder of key to begin with prefix
455 # reformat remainder of key to begin with prefix
452 rem = key[len(prefix_str):]
456 rem = key[len(prefix_str):]
453 # force repr wrapped in '
457 # force repr wrapped in '
454 rem_repr = repr(rem + '"')
458 rem_repr = repr(rem + '"')
455 if rem_repr.startswith('u') and prefix[0] not in 'uU':
459 if rem_repr.startswith('u') and prefix[0] not in 'uU':
456 # Found key is unicode, but prefix is Py2 string.
460 # Found key is unicode, but prefix is Py2 string.
457 # Therefore attempt to interpret key as string.
461 # Therefore attempt to interpret key as string.
458 try:
462 try:
459 rem_repr = repr(rem.encode('ascii') + '"')
463 rem_repr = repr(rem.encode('ascii') + '"')
460 except UnicodeEncodeError:
464 except UnicodeEncodeError:
461 continue
465 continue
462
466
463 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
467 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
464 if quote == '"':
468 if quote == '"':
465 # The entered prefix is quoted with ",
469 # The entered prefix is quoted with ",
466 # but the match is quoted with '.
470 # but the match is quoted with '.
467 # A contained " hence needs escaping for comparison:
471 # A contained " hence needs escaping for comparison:
468 rem_repr = rem_repr.replace('"', '\\"')
472 rem_repr = rem_repr.replace('"', '\\"')
469
473
470 # then reinsert prefix from start of token
474 # then reinsert prefix from start of token
471 matched.append('%s%s' % (token_prefix, rem_repr))
475 matched.append('%s%s' % (token_prefix, rem_repr))
472 return quote, token_start, matched
476 return quote, token_start, matched
473
477
474
478
475 def _safe_isinstance(obj, module, class_name):
479 def _safe_isinstance(obj, module, class_name):
476 """Checks if obj is an instance of module.class_name if loaded
480 """Checks if obj is an instance of module.class_name if loaded
477 """
481 """
478 return (module in sys.modules and
482 return (module in sys.modules and
479 isinstance(obj, getattr(__import__(module), class_name)))
483 isinstance(obj, getattr(__import__(module), class_name)))
480
484
481
485
482 def back_unicode_name_matches(text):
486 def back_unicode_name_matches(text):
483 u"""Match unicode characters back to unicode name
487 u"""Match unicode characters back to unicode name
484
488
485 This does β˜ƒ -> \\snowman
489 This does β˜ƒ -> \\snowman
486
490
487 Note that snowman is not a valid python3 combining character but will be expanded.
491 Note that snowman is not a valid python3 combining character but will be expanded.
488 Though it will not recombine back to the snowman character by the completion machinery.
492 Though it will not recombine back to the snowman character by the completion machinery.
489
493
490 This will not either back-complete standard sequences like \\n, \\b ...
494 This will not either back-complete standard sequences like \\n, \\b ...
491
495
492 Used on Python 3 only.
496 Used on Python 3 only.
493 """
497 """
494 if len(text)<2:
498 if len(text)<2:
495 return u'', ()
499 return u'', ()
496 maybe_slash = text[-2]
500 maybe_slash = text[-2]
497 if maybe_slash != '\\':
501 if maybe_slash != '\\':
498 return u'', ()
502 return u'', ()
499
503
500 char = text[-1]
504 char = text[-1]
501 # no expand on quote for completion in strings.
505 # no expand on quote for completion in strings.
502 # nor backcomplete standard ascii keys
506 # nor backcomplete standard ascii keys
503 if char in string.ascii_letters or char in ['"',"'"]:
507 if char in string.ascii_letters or char in ['"',"'"]:
504 return u'', ()
508 return u'', ()
505 try :
509 try :
506 unic = unicodedata.name(char)
510 unic = unicodedata.name(char)
507 return '\\'+char,['\\'+unic]
511 return '\\'+char,['\\'+unic]
508 except KeyError:
512 except KeyError:
509 pass
513 pass
510 return u'', ()
514 return u'', ()
511
515
512 def back_latex_name_matches(text):
516 def back_latex_name_matches(text):
513 u"""Match latex characters back to unicode name
517 u"""Match latex characters back to unicode name
514
518
515 This does ->\\sqrt
519 This does ->\\sqrt
516
520
517 Used on Python 3 only.
521 Used on Python 3 only.
518 """
522 """
519 if len(text)<2:
523 if len(text)<2:
520 return u'', ()
524 return u'', ()
521 maybe_slash = text[-2]
525 maybe_slash = text[-2]
522 if maybe_slash != '\\':
526 if maybe_slash != '\\':
523 return u'', ()
527 return u'', ()
524
528
525
529
526 char = text[-1]
530 char = text[-1]
527 # no expand on quote for completion in strings.
531 # no expand on quote for completion in strings.
528 # nor backcomplete standard ascii keys
532 # nor backcomplete standard ascii keys
529 if char in string.ascii_letters or char in ['"',"'"]:
533 if char in string.ascii_letters or char in ['"',"'"]:
530 return u'', ()
534 return u'', ()
531 try :
535 try :
532 latex = reverse_latex_symbol[char]
536 latex = reverse_latex_symbol[char]
533 # '\\' replace the \ as well
537 # '\\' replace the \ as well
534 return '\\'+char,[latex]
538 return '\\'+char,[latex]
535 except KeyError:
539 except KeyError:
536 pass
540 pass
537 return u'', ()
541 return u'', ()
538
542
539
543
540 class IPCompleter(Completer):
544 class IPCompleter(Completer):
541 """Extension of the completer class with IPython-specific features"""
545 """Extension of the completer class with IPython-specific features"""
542
546
543 @observe('greedy')
547 @observe('greedy')
544 def _greedy_changed(self, change):
548 def _greedy_changed(self, change):
545 """update the splitter and readline delims when greedy is changed"""
549 """update the splitter and readline delims when greedy is changed"""
546 if change['new']:
550 if change['new']:
547 self.splitter.delims = GREEDY_DELIMS
551 self.splitter.delims = GREEDY_DELIMS
548 else:
552 else:
549 self.splitter.delims = DELIMS
553 self.splitter.delims = DELIMS
550
554
551 if self.readline:
555 if self.readline:
552 self.readline.set_completer_delims(self.splitter.delims)
556 self.readline.set_completer_delims(self.splitter.delims)
553
557
554 merge_completions = Bool(True,
558 merge_completions = Bool(True,
555 help="""Whether to merge completion results into a single list
559 help="""Whether to merge completion results into a single list
556
560
557 If False, only the completion results from the first non-empty
561 If False, only the completion results from the first non-empty
558 completer will be returned.
562 completer will be returned.
559 """
563 """
560 ).tag(config=True)
564 ).tag(config=True)
561 omit__names = Enum((0,1,2), default_value=2,
565 omit__names = Enum((0,1,2), default_value=2,
562 help="""Instruct the completer to omit private method names
566 help="""Instruct the completer to omit private method names
563
567
564 Specifically, when completing on ``object.<tab>``.
568 Specifically, when completing on ``object.<tab>``.
565
569
566 When 2 [default]: all names that start with '_' will be excluded.
570 When 2 [default]: all names that start with '_' will be excluded.
567
571
568 When 1: all 'magic' names (``__foo__``) will be excluded.
572 When 1: all 'magic' names (``__foo__``) will be excluded.
569
573
570 When 0: nothing will be excluded.
574 When 0: nothing will be excluded.
571 """
575 """
572 ).tag(config=True)
576 ).tag(config=True)
573 limit_to__all__ = Bool(False,
577 limit_to__all__ = Bool(False,
574 help="""
578 help="""
575 DEPRECATED as of version 5.0.
579 DEPRECATED as of version 5.0.
576
580
577 Instruct the completer to use __all__ for the completion
581 Instruct the completer to use __all__ for the completion
578
582
579 Specifically, when completing on ``object.<tab>``.
583 Specifically, when completing on ``object.<tab>``.
580
584
581 When True: only those names in obj.__all__ will be included.
585 When True: only those names in obj.__all__ will be included.
582
586
583 When False [default]: the __all__ attribute is ignored
587 When False [default]: the __all__ attribute is ignored
584 """,
588 """,
585 ).tag(config=True)
589 ).tag(config=True)
586
590
587 def __init__(self, shell=None, namespace=None, global_namespace=None,
591 def __init__(self, shell=None, namespace=None, global_namespace=None,
588 use_readline=True, config=None, **kwargs):
592 use_readline=True, config=None, **kwargs):
589 """IPCompleter() -> completer
593 """IPCompleter() -> completer
590
594
591 Return a completer object suitable for use by the readline library
595 Return a completer object suitable for use by the readline library
592 via readline.set_completer().
596 via readline.set_completer().
593
597
594 Inputs:
598 Inputs:
595
599
596 - shell: a pointer to the ipython shell itself. This is needed
600 - shell: a pointer to the ipython shell itself. This is needed
597 because this completer knows about magic functions, and those can
601 because this completer knows about magic functions, and those can
598 only be accessed via the ipython instance.
602 only be accessed via the ipython instance.
599
603
600 - namespace: an optional dict where completions are performed.
604 - namespace: an optional dict where completions are performed.
601
605
602 - global_namespace: secondary optional dict for completions, to
606 - global_namespace: secondary optional dict for completions, to
603 handle cases (such as IPython embedded inside functions) where
607 handle cases (such as IPython embedded inside functions) where
604 both Python scopes are visible.
608 both Python scopes are visible.
605
609
606 use_readline : bool, optional
610 use_readline : bool, optional
607 If true, use the readline library. This completer can still function
611 If true, use the readline library. This completer can still function
608 without readline, though in that case callers must provide some extra
612 without readline, though in that case callers must provide some extra
609 information on each call about the current line."""
613 information on each call about the current line."""
610
614
611 self.magic_escape = ESC_MAGIC
615 self.magic_escape = ESC_MAGIC
612 self.splitter = CompletionSplitter()
616 self.splitter = CompletionSplitter()
613
617
614 # Readline configuration, only used by the rlcompleter method.
618 # Readline configuration, only used by the rlcompleter method.
615 if use_readline:
619 if use_readline:
616 # We store the right version of readline so that later code
620 # We store the right version of readline so that later code
617 import IPython.utils.rlineimpl as readline
621 import IPython.utils.rlineimpl as readline
618 self.readline = readline
622 self.readline = readline
619 else:
623 else:
620 self.readline = None
624 self.readline = None
621
625
622 # _greedy_changed() depends on splitter and readline being defined:
626 # _greedy_changed() depends on splitter and readline being defined:
623 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
627 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
624 config=config, **kwargs)
628 config=config, **kwargs)
625
629
626 # List where completion matches will be stored
630 # List where completion matches will be stored
627 self.matches = []
631 self.matches = []
628 self.shell = shell
632 self.shell = shell
629 # Regexp to split filenames with spaces in them
633 # Regexp to split filenames with spaces in them
630 self.space_name_re = re.compile(r'([^\\] )')
634 self.space_name_re = re.compile(r'([^\\] )')
631 # Hold a local ref. to glob.glob for speed
635 # Hold a local ref. to glob.glob for speed
632 self.glob = glob.glob
636 self.glob = glob.glob
633
637
634 # Determine if we are running on 'dumb' terminals, like (X)Emacs
638 # Determine if we are running on 'dumb' terminals, like (X)Emacs
635 # buffers, to avoid completion problems.
639 # buffers, to avoid completion problems.
636 term = os.environ.get('TERM','xterm')
640 term = os.environ.get('TERM','xterm')
637 self.dumb_terminal = term in ['dumb','emacs']
641 self.dumb_terminal = term in ['dumb','emacs']
638
642
639 # Special handling of backslashes needed in win32 platforms
643 # Special handling of backslashes needed in win32 platforms
640 if sys.platform == "win32":
644 if sys.platform == "win32":
641 self.clean_glob = self._clean_glob_win32
645 self.clean_glob = self._clean_glob_win32
642 else:
646 else:
643 self.clean_glob = self._clean_glob
647 self.clean_glob = self._clean_glob
644
648
645 #regexp to parse docstring for function signature
649 #regexp to parse docstring for function signature
646 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
650 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
647 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
651 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
648 #use this if positional argument name is also needed
652 #use this if positional argument name is also needed
649 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
653 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
650
654
651 # All active matcher routines for completion
655 # All active matcher routines for completion
652 self.matchers = [
656 self.matchers = [
653 self.python_matches,
657 self.python_matches,
654 self.file_matches,
658 self.file_matches,
655 self.magic_matches,
659 self.magic_matches,
656 self.python_func_kw_matches,
660 self.python_func_kw_matches,
657 self.dict_key_matches,
661 self.dict_key_matches,
658 ]
662 ]
659
663
660 # This is set externally by InteractiveShell
664 # This is set externally by InteractiveShell
661 self.custom_completers = None
665 self.custom_completers = None
662
666
663 def all_completions(self, text):
667 def all_completions(self, text):
664 """
668 """
665 Wrapper around the complete method for the benefit of emacs.
669 Wrapper around the complete method for the benefit of emacs.
666 """
670 """
667 return self.complete(text)[1]
671 return self.complete(text)[1]
668
672
669 def _clean_glob(self, text):
673 def _clean_glob(self, text):
670 return self.glob("%s*" % text)
674 return self.glob("%s*" % text)
671
675
672 def _clean_glob_win32(self,text):
676 def _clean_glob_win32(self,text):
673 return [f.replace("\\","/")
677 return [f.replace("\\","/")
674 for f in self.glob("%s*" % text)]
678 for f in self.glob("%s*" % text)]
675
679
676 def file_matches(self, text):
680 def file_matches(self, text):
677 """Match filenames, expanding ~USER type strings.
681 """Match filenames, expanding ~USER type strings.
678
682
679 Most of the seemingly convoluted logic in this completer is an
683 Most of the seemingly convoluted logic in this completer is an
680 attempt to handle filenames with spaces in them. And yet it's not
684 attempt to handle filenames with spaces in them. And yet it's not
681 quite perfect, because Python's readline doesn't expose all of the
685 quite perfect, because Python's readline doesn't expose all of the
682 GNU readline details needed for this to be done correctly.
686 GNU readline details needed for this to be done correctly.
683
687
684 For a filename with a space in it, the printed completions will be
688 For a filename with a space in it, the printed completions will be
685 only the parts after what's already been typed (instead of the
689 only the parts after what's already been typed (instead of the
686 full completions, as is normally done). I don't think with the
690 full completions, as is normally done). I don't think with the
687 current (as of Python 2.3) Python readline it's possible to do
691 current (as of Python 2.3) Python readline it's possible to do
688 better."""
692 better."""
689
693
690 # chars that require escaping with backslash - i.e. chars
694 # chars that require escaping with backslash - i.e. chars
691 # that readline treats incorrectly as delimiters, but we
695 # that readline treats incorrectly as delimiters, but we
692 # don't want to treat as delimiters in filename matching
696 # don't want to treat as delimiters in filename matching
693 # when escaped with backslash
697 # when escaped with backslash
694 if text.startswith('!'):
698 if text.startswith('!'):
695 text = text[1:]
699 text = text[1:]
696 text_prefix = u'!'
700 text_prefix = u'!'
697 else:
701 else:
698 text_prefix = u''
702 text_prefix = u''
699
703
700 text_until_cursor = self.text_until_cursor
704 text_until_cursor = self.text_until_cursor
701 # track strings with open quotes
705 # track strings with open quotes
702 open_quotes = has_open_quotes(text_until_cursor)
706 open_quotes = has_open_quotes(text_until_cursor)
703
707
704 if '(' in text_until_cursor or '[' in text_until_cursor:
708 if '(' in text_until_cursor or '[' in text_until_cursor:
705 lsplit = text
709 lsplit = text
706 else:
710 else:
707 try:
711 try:
708 # arg_split ~ shlex.split, but with unicode bugs fixed by us
712 # arg_split ~ shlex.split, but with unicode bugs fixed by us
709 lsplit = arg_split(text_until_cursor)[-1]
713 lsplit = arg_split(text_until_cursor)[-1]
710 except ValueError:
714 except ValueError:
711 # typically an unmatched ", or backslash without escaped char.
715 # typically an unmatched ", or backslash without escaped char.
712 if open_quotes:
716 if open_quotes:
713 lsplit = text_until_cursor.split(open_quotes)[-1]
717 lsplit = text_until_cursor.split(open_quotes)[-1]
714 else:
718 else:
715 return []
719 return []
716 except IndexError:
720 except IndexError:
717 # tab pressed on empty line
721 # tab pressed on empty line
718 lsplit = ""
722 lsplit = ""
719
723
720 if not open_quotes and lsplit != protect_filename(lsplit):
724 if not open_quotes and lsplit != protect_filename(lsplit):
721 # if protectables are found, do matching on the whole escaped name
725 # if protectables are found, do matching on the whole escaped name
722 has_protectables = True
726 has_protectables = True
723 text0,text = text,lsplit
727 text0,text = text,lsplit
724 else:
728 else:
725 has_protectables = False
729 has_protectables = False
726 text = os.path.expanduser(text)
730 text = os.path.expanduser(text)
727
731
728 if text == "":
732 if text == "":
729 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
733 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
730
734
731 # Compute the matches from the filesystem
735 # Compute the matches from the filesystem
732 m0 = self.clean_glob(text.replace('\\',''))
736 if sys.platform == 'win32':
737 m0 = self.clean_glob(text)
738 else:
739 m0 = self.clean_glob(text.replace('\\', ''))
733
740
734 if has_protectables:
741 if has_protectables:
735 # If we had protectables, we need to revert our changes to the
742 # If we had protectables, we need to revert our changes to the
736 # beginning of filename so that we don't double-write the part
743 # beginning of filename so that we don't double-write the part
737 # of the filename we have so far
744 # of the filename we have so far
738 len_lsplit = len(lsplit)
745 len_lsplit = len(lsplit)
739 matches = [text_prefix + text0 +
746 matches = [text_prefix + text0 +
740 protect_filename(f[len_lsplit:]) for f in m0]
747 protect_filename(f[len_lsplit:]) for f in m0]
741 else:
748 else:
742 if open_quotes:
749 if open_quotes:
743 # if we have a string with an open quote, we don't need to
750 # if we have a string with an open quote, we don't need to
744 # protect the names at all (and we _shouldn't_, as it
751 # protect the names at all (and we _shouldn't_, as it
745 # would cause bugs when the filesystem call is made).
752 # would cause bugs when the filesystem call is made).
746 matches = m0
753 matches = m0
747 else:
754 else:
748 matches = [text_prefix +
755 matches = [text_prefix +
749 protect_filename(f) for f in m0]
756 protect_filename(f) for f in m0]
750
757
751 # Mark directories in input list by appending '/' to their names.
758 # Mark directories in input list by appending '/' to their names.
752 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
759 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
753
760
754 def magic_matches(self, text):
761 def magic_matches(self, text):
755 """Match magics"""
762 """Match magics"""
756 # Get all shell magics now rather than statically, so magics loaded at
763 # Get all shell magics now rather than statically, so magics loaded at
757 # runtime show up too.
764 # runtime show up too.
758 lsm = self.shell.magics_manager.lsmagic()
765 lsm = self.shell.magics_manager.lsmagic()
759 line_magics = lsm['line']
766 line_magics = lsm['line']
760 cell_magics = lsm['cell']
767 cell_magics = lsm['cell']
761 pre = self.magic_escape
768 pre = self.magic_escape
762 pre2 = pre+pre
769 pre2 = pre+pre
763
770
764 # Completion logic:
771 # Completion logic:
765 # - user gives %%: only do cell magics
772 # - user gives %%: only do cell magics
766 # - user gives %: do both line and cell magics
773 # - user gives %: do both line and cell magics
767 # - no prefix: do both
774 # - no prefix: do both
768 # In other words, line magics are skipped if the user gives %% explicitly
775 # In other words, line magics are skipped if the user gives %% explicitly
769 bare_text = text.lstrip(pre)
776 bare_text = text.lstrip(pre)
770 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
777 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
771 if not text.startswith(pre2):
778 if not text.startswith(pre2):
772 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
779 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
773 return [cast_unicode_py2(c) for c in comp]
780 return [cast_unicode_py2(c) for c in comp]
774
781
775
782
776 def python_matches(self, text):
783 def python_matches(self, text):
777 """Match attributes or global python names"""
784 """Match attributes or global python names"""
778 if "." in text:
785 if "." in text:
779 try:
786 try:
780 matches = self.attr_matches(text)
787 matches = self.attr_matches(text)
781 if text.endswith('.') and self.omit__names:
788 if text.endswith('.') and self.omit__names:
782 if self.omit__names == 1:
789 if self.omit__names == 1:
783 # true if txt is _not_ a __ name, false otherwise:
790 # true if txt is _not_ a __ name, false otherwise:
784 no__name = (lambda txt:
791 no__name = (lambda txt:
785 re.match(r'.*\.__.*?__',txt) is None)
792 re.match(r'.*\.__.*?__',txt) is None)
786 else:
793 else:
787 # true if txt is _not_ a _ name, false otherwise:
794 # true if txt is _not_ a _ name, false otherwise:
788 no__name = (lambda txt:
795 no__name = (lambda txt:
789 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
796 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
790 matches = filter(no__name, matches)
797 matches = filter(no__name, matches)
791 except NameError:
798 except NameError:
792 # catches <undefined attributes>.<tab>
799 # catches <undefined attributes>.<tab>
793 matches = []
800 matches = []
794 else:
801 else:
795 matches = self.global_matches(text)
802 matches = self.global_matches(text)
796 return matches
803 return matches
797
804
798 def _default_arguments_from_docstring(self, doc):
805 def _default_arguments_from_docstring(self, doc):
799 """Parse the first line of docstring for call signature.
806 """Parse the first line of docstring for call signature.
800
807
801 Docstring should be of the form 'min(iterable[, key=func])\n'.
808 Docstring should be of the form 'min(iterable[, key=func])\n'.
802 It can also parse cython docstring of the form
809 It can also parse cython docstring of the form
803 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
810 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
804 """
811 """
805 if doc is None:
812 if doc is None:
806 return []
813 return []
807
814
808 #care only the firstline
815 #care only the firstline
809 line = doc.lstrip().splitlines()[0]
816 line = doc.lstrip().splitlines()[0]
810
817
811 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
818 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
812 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
819 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
813 sig = self.docstring_sig_re.search(line)
820 sig = self.docstring_sig_re.search(line)
814 if sig is None:
821 if sig is None:
815 return []
822 return []
816 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
823 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
817 sig = sig.groups()[0].split(',')
824 sig = sig.groups()[0].split(',')
818 ret = []
825 ret = []
819 for s in sig:
826 for s in sig:
820 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
827 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
821 ret += self.docstring_kwd_re.findall(s)
828 ret += self.docstring_kwd_re.findall(s)
822 return ret
829 return ret
823
830
824 def _default_arguments(self, obj):
831 def _default_arguments(self, obj):
825 """Return the list of default arguments of obj if it is callable,
832 """Return the list of default arguments of obj if it is callable,
826 or empty list otherwise."""
833 or empty list otherwise."""
827 call_obj = obj
834 call_obj = obj
828 ret = []
835 ret = []
829 if inspect.isbuiltin(obj):
836 if inspect.isbuiltin(obj):
830 pass
837 pass
831 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
838 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
832 if inspect.isclass(obj):
839 if inspect.isclass(obj):
833 #for cython embededsignature=True the constructor docstring
840 #for cython embededsignature=True the constructor docstring
834 #belongs to the object itself not __init__
841 #belongs to the object itself not __init__
835 ret += self._default_arguments_from_docstring(
842 ret += self._default_arguments_from_docstring(
836 getattr(obj, '__doc__', ''))
843 getattr(obj, '__doc__', ''))
837 # for classes, check for __init__,__new__
844 # for classes, check for __init__,__new__
838 call_obj = (getattr(obj, '__init__', None) or
845 call_obj = (getattr(obj, '__init__', None) or
839 getattr(obj, '__new__', None))
846 getattr(obj, '__new__', None))
840 # for all others, check if they are __call__able
847 # for all others, check if they are __call__able
841 elif hasattr(obj, '__call__'):
848 elif hasattr(obj, '__call__'):
842 call_obj = obj.__call__
849 call_obj = obj.__call__
843 ret += self._default_arguments_from_docstring(
850 ret += self._default_arguments_from_docstring(
844 getattr(call_obj, '__doc__', ''))
851 getattr(call_obj, '__doc__', ''))
845
852
846 if PY3:
853 if PY3:
847 _keeps = (inspect.Parameter.KEYWORD_ONLY,
854 _keeps = (inspect.Parameter.KEYWORD_ONLY,
848 inspect.Parameter.POSITIONAL_OR_KEYWORD)
855 inspect.Parameter.POSITIONAL_OR_KEYWORD)
849 signature = inspect.signature
856 signature = inspect.signature
850 else:
857 else:
851 import IPython.utils.signatures
858 import IPython.utils.signatures
852 _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
859 _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
853 IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
860 IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
854 signature = IPython.utils.signatures.signature
861 signature = IPython.utils.signatures.signature
855
862
856 try:
863 try:
857 sig = signature(call_obj)
864 sig = signature(call_obj)
858 ret.extend(k for k, v in sig.parameters.items() if
865 ret.extend(k for k, v in sig.parameters.items() if
859 v.kind in _keeps)
866 v.kind in _keeps)
860 except ValueError:
867 except ValueError:
861 pass
868 pass
862
869
863 return list(set(ret))
870 return list(set(ret))
864
871
865 def python_func_kw_matches(self,text):
872 def python_func_kw_matches(self,text):
866 """Match named parameters (kwargs) of the last open function"""
873 """Match named parameters (kwargs) of the last open function"""
867
874
868 if "." in text: # a parameter cannot be dotted
875 if "." in text: # a parameter cannot be dotted
869 return []
876 return []
870 try: regexp = self.__funcParamsRegex
877 try: regexp = self.__funcParamsRegex
871 except AttributeError:
878 except AttributeError:
872 regexp = self.__funcParamsRegex = re.compile(r'''
879 regexp = self.__funcParamsRegex = re.compile(r'''
873 '.*?(?<!\\)' | # single quoted strings or
880 '.*?(?<!\\)' | # single quoted strings or
874 ".*?(?<!\\)" | # double quoted strings or
881 ".*?(?<!\\)" | # double quoted strings or
875 \w+ | # identifier
882 \w+ | # identifier
876 \S # other characters
883 \S # other characters
877 ''', re.VERBOSE | re.DOTALL)
884 ''', re.VERBOSE | re.DOTALL)
878 # 1. find the nearest identifier that comes before an unclosed
885 # 1. find the nearest identifier that comes before an unclosed
879 # parenthesis before the cursor
886 # parenthesis before the cursor
880 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
887 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
881 tokens = regexp.findall(self.text_until_cursor)
888 tokens = regexp.findall(self.text_until_cursor)
882 tokens.reverse()
889 tokens.reverse()
883 iterTokens = iter(tokens); openPar = 0
890 iterTokens = iter(tokens); openPar = 0
884
891
885 for token in iterTokens:
892 for token in iterTokens:
886 if token == ')':
893 if token == ')':
887 openPar -= 1
894 openPar -= 1
888 elif token == '(':
895 elif token == '(':
889 openPar += 1
896 openPar += 1
890 if openPar > 0:
897 if openPar > 0:
891 # found the last unclosed parenthesis
898 # found the last unclosed parenthesis
892 break
899 break
893 else:
900 else:
894 return []
901 return []
895 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
902 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
896 ids = []
903 ids = []
897 isId = re.compile(r'\w+$').match
904 isId = re.compile(r'\w+$').match
898
905
899 while True:
906 while True:
900 try:
907 try:
901 ids.append(next(iterTokens))
908 ids.append(next(iterTokens))
902 if not isId(ids[-1]):
909 if not isId(ids[-1]):
903 ids.pop(); break
910 ids.pop(); break
904 if not next(iterTokens) == '.':
911 if not next(iterTokens) == '.':
905 break
912 break
906 except StopIteration:
913 except StopIteration:
907 break
914 break
908 # lookup the candidate callable matches either using global_matches
915 # lookup the candidate callable matches either using global_matches
909 # or attr_matches for dotted names
916 # or attr_matches for dotted names
910 if len(ids) == 1:
917 if len(ids) == 1:
911 callableMatches = self.global_matches(ids[0])
918 callableMatches = self.global_matches(ids[0])
912 else:
919 else:
913 callableMatches = self.attr_matches('.'.join(ids[::-1]))
920 callableMatches = self.attr_matches('.'.join(ids[::-1]))
914 argMatches = []
921 argMatches = []
915 for callableMatch in callableMatches:
922 for callableMatch in callableMatches:
916 try:
923 try:
917 namedArgs = self._default_arguments(eval(callableMatch,
924 namedArgs = self._default_arguments(eval(callableMatch,
918 self.namespace))
925 self.namespace))
919 except:
926 except:
920 continue
927 continue
921
928
922 for namedArg in namedArgs:
929 for namedArg in namedArgs:
923 if namedArg.startswith(text):
930 if namedArg.startswith(text):
924 argMatches.append(u"%s=" %namedArg)
931 argMatches.append(u"%s=" %namedArg)
925 return argMatches
932 return argMatches
926
933
927 def dict_key_matches(self, text):
934 def dict_key_matches(self, text):
928 "Match string keys in a dictionary, after e.g. 'foo[' "
935 "Match string keys in a dictionary, after e.g. 'foo[' "
929 def get_keys(obj):
936 def get_keys(obj):
930 # Objects can define their own completions by defining an
937 # Objects can define their own completions by defining an
931 # _ipy_key_completions_() method.
938 # _ipy_key_completions_() method.
932 method = get_real_method(obj, '_ipython_key_completions_')
939 method = get_real_method(obj, '_ipython_key_completions_')
933 if method is not None:
940 if method is not None:
934 return method()
941 return method()
935
942
936 # Special case some common in-memory dict-like types
943 # Special case some common in-memory dict-like types
937 if isinstance(obj, dict) or\
944 if isinstance(obj, dict) or\
938 _safe_isinstance(obj, 'pandas', 'DataFrame'):
945 _safe_isinstance(obj, 'pandas', 'DataFrame'):
939 try:
946 try:
940 return list(obj.keys())
947 return list(obj.keys())
941 except Exception:
948 except Exception:
942 return []
949 return []
943 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
950 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
944 _safe_isinstance(obj, 'numpy', 'void'):
951 _safe_isinstance(obj, 'numpy', 'void'):
945 return obj.dtype.names or []
952 return obj.dtype.names or []
946 return []
953 return []
947
954
948 try:
955 try:
949 regexps = self.__dict_key_regexps
956 regexps = self.__dict_key_regexps
950 except AttributeError:
957 except AttributeError:
951 dict_key_re_fmt = r'''(?x)
958 dict_key_re_fmt = r'''(?x)
952 ( # match dict-referring expression wrt greedy setting
959 ( # match dict-referring expression wrt greedy setting
953 %s
960 %s
954 )
961 )
955 \[ # open bracket
962 \[ # open bracket
956 \s* # and optional whitespace
963 \s* # and optional whitespace
957 ([uUbB]? # string prefix (r not handled)
964 ([uUbB]? # string prefix (r not handled)
958 (?: # unclosed string
965 (?: # unclosed string
959 '(?:[^']|(?<!\\)\\')*
966 '(?:[^']|(?<!\\)\\')*
960 |
967 |
961 "(?:[^"]|(?<!\\)\\")*
968 "(?:[^"]|(?<!\\)\\")*
962 )
969 )
963 )?
970 )?
964 $
971 $
965 '''
972 '''
966 regexps = self.__dict_key_regexps = {
973 regexps = self.__dict_key_regexps = {
967 False: re.compile(dict_key_re_fmt % '''
974 False: re.compile(dict_key_re_fmt % '''
968 # identifiers separated by .
975 # identifiers separated by .
969 (?!\d)\w+
976 (?!\d)\w+
970 (?:\.(?!\d)\w+)*
977 (?:\.(?!\d)\w+)*
971 '''),
978 '''),
972 True: re.compile(dict_key_re_fmt % '''
979 True: re.compile(dict_key_re_fmt % '''
973 .+
980 .+
974 ''')
981 ''')
975 }
982 }
976
983
977 match = regexps[self.greedy].search(self.text_until_cursor)
984 match = regexps[self.greedy].search(self.text_until_cursor)
978 if match is None:
985 if match is None:
979 return []
986 return []
980
987
981 expr, prefix = match.groups()
988 expr, prefix = match.groups()
982 try:
989 try:
983 obj = eval(expr, self.namespace)
990 obj = eval(expr, self.namespace)
984 except Exception:
991 except Exception:
985 try:
992 try:
986 obj = eval(expr, self.global_namespace)
993 obj = eval(expr, self.global_namespace)
987 except Exception:
994 except Exception:
988 return []
995 return []
989
996
990 keys = get_keys(obj)
997 keys = get_keys(obj)
991 if not keys:
998 if not keys:
992 return keys
999 return keys
993 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1000 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
994 if not matches:
1001 if not matches:
995 return matches
1002 return matches
996
1003
997 # get the cursor position of
1004 # get the cursor position of
998 # - the text being completed
1005 # - the text being completed
999 # - the start of the key text
1006 # - the start of the key text
1000 # - the start of the completion
1007 # - the start of the completion
1001 text_start = len(self.text_until_cursor) - len(text)
1008 text_start = len(self.text_until_cursor) - len(text)
1002 if prefix:
1009 if prefix:
1003 key_start = match.start(2)
1010 key_start = match.start(2)
1004 completion_start = key_start + token_offset
1011 completion_start = key_start + token_offset
1005 else:
1012 else:
1006 key_start = completion_start = match.end()
1013 key_start = completion_start = match.end()
1007
1014
1008 # grab the leading prefix, to make sure all completions start with `text`
1015 # grab the leading prefix, to make sure all completions start with `text`
1009 if text_start > key_start:
1016 if text_start > key_start:
1010 leading = ''
1017 leading = ''
1011 else:
1018 else:
1012 leading = text[text_start:completion_start]
1019 leading = text[text_start:completion_start]
1013
1020
1014 # the index of the `[` character
1021 # the index of the `[` character
1015 bracket_idx = match.end(1)
1022 bracket_idx = match.end(1)
1016
1023
1017 # append closing quote and bracket as appropriate
1024 # append closing quote and bracket as appropriate
1018 # this is *not* appropriate if the opening quote or bracket is outside
1025 # this is *not* appropriate if the opening quote or bracket is outside
1019 # the text given to this method
1026 # the text given to this method
1020 suf = ''
1027 suf = ''
1021 continuation = self.line_buffer[len(self.text_until_cursor):]
1028 continuation = self.line_buffer[len(self.text_until_cursor):]
1022 if key_start > text_start and closing_quote:
1029 if key_start > text_start and closing_quote:
1023 # quotes were opened inside text, maybe close them
1030 # quotes were opened inside text, maybe close them
1024 if continuation.startswith(closing_quote):
1031 if continuation.startswith(closing_quote):
1025 continuation = continuation[len(closing_quote):]
1032 continuation = continuation[len(closing_quote):]
1026 else:
1033 else:
1027 suf += closing_quote
1034 suf += closing_quote
1028 if bracket_idx > text_start:
1035 if bracket_idx > text_start:
1029 # brackets were opened inside text, maybe close them
1036 # brackets were opened inside text, maybe close them
1030 if not continuation.startswith(']'):
1037 if not continuation.startswith(']'):
1031 suf += ']'
1038 suf += ']'
1032
1039
1033 return [leading + k + suf for k in matches]
1040 return [leading + k + suf for k in matches]
1034
1041
1035 def unicode_name_matches(self, text):
1042 def unicode_name_matches(self, text):
1036 u"""Match Latex-like syntax for unicode characters base
1043 u"""Match Latex-like syntax for unicode characters base
1037 on the name of the character.
1044 on the name of the character.
1038
1045
1039 This does \\GREEK SMALL LETTER ETA -> Ξ·
1046 This does \\GREEK SMALL LETTER ETA -> Ξ·
1040
1047
1041 Works only on valid python 3 identifier, or on combining characters that
1048 Works only on valid python 3 identifier, or on combining characters that
1042 will combine to form a valid identifier.
1049 will combine to form a valid identifier.
1043
1050
1044 Used on Python 3 only.
1051 Used on Python 3 only.
1045 """
1052 """
1046 slashpos = text.rfind('\\')
1053 slashpos = text.rfind('\\')
1047 if slashpos > -1:
1054 if slashpos > -1:
1048 s = text[slashpos+1:]
1055 s = text[slashpos+1:]
1049 try :
1056 try :
1050 unic = unicodedata.lookup(s)
1057 unic = unicodedata.lookup(s)
1051 # allow combining chars
1058 # allow combining chars
1052 if ('a'+unic).isidentifier():
1059 if ('a'+unic).isidentifier():
1053 return '\\'+s,[unic]
1060 return '\\'+s,[unic]
1054 except KeyError:
1061 except KeyError:
1055 pass
1062 pass
1056 return u'', []
1063 return u'', []
1057
1064
1058
1065
1059
1066
1060
1067
1061 def latex_matches(self, text):
1068 def latex_matches(self, text):
1062 u"""Match Latex syntax for unicode characters.
1069 u"""Match Latex syntax for unicode characters.
1063
1070
1064 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1071 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1065
1072
1066 Used on Python 3 only.
1073 Used on Python 3 only.
1067 """
1074 """
1068 slashpos = text.rfind('\\')
1075 slashpos = text.rfind('\\')
1069 if slashpos > -1:
1076 if slashpos > -1:
1070 s = text[slashpos:]
1077 s = text[slashpos:]
1071 if s in latex_symbols:
1078 if s in latex_symbols:
1072 # Try to complete a full latex symbol to unicode
1079 # Try to complete a full latex symbol to unicode
1073 # \\alpha -> Ξ±
1080 # \\alpha -> Ξ±
1074 return s, [latex_symbols[s]]
1081 return s, [latex_symbols[s]]
1075 else:
1082 else:
1076 # If a user has partially typed a latex symbol, give them
1083 # If a user has partially typed a latex symbol, give them
1077 # a full list of options \al -> [\aleph, \alpha]
1084 # a full list of options \al -> [\aleph, \alpha]
1078 matches = [k for k in latex_symbols if k.startswith(s)]
1085 matches = [k for k in latex_symbols if k.startswith(s)]
1079 return s, matches
1086 return s, matches
1080 return u'', []
1087 return u'', []
1081
1088
1082 def dispatch_custom_completer(self, text):
1089 def dispatch_custom_completer(self, text):
1083 if not self.custom_completers:
1090 if not self.custom_completers:
1084 return
1091 return
1085
1092
1086 line = self.line_buffer
1093 line = self.line_buffer
1087 if not line.strip():
1094 if not line.strip():
1088 return None
1095 return None
1089
1096
1090 # Create a little structure to pass all the relevant information about
1097 # Create a little structure to pass all the relevant information about
1091 # the current completion to any custom completer.
1098 # the current completion to any custom completer.
1092 event = Bunch()
1099 event = Bunch()
1093 event.line = line
1100 event.line = line
1094 event.symbol = text
1101 event.symbol = text
1095 cmd = line.split(None,1)[0]
1102 cmd = line.split(None,1)[0]
1096 event.command = cmd
1103 event.command = cmd
1097 event.text_until_cursor = self.text_until_cursor
1104 event.text_until_cursor = self.text_until_cursor
1098
1105
1099 # for foo etc, try also to find completer for %foo
1106 # for foo etc, try also to find completer for %foo
1100 if not cmd.startswith(self.magic_escape):
1107 if not cmd.startswith(self.magic_escape):
1101 try_magic = self.custom_completers.s_matches(
1108 try_magic = self.custom_completers.s_matches(
1102 self.magic_escape + cmd)
1109 self.magic_escape + cmd)
1103 else:
1110 else:
1104 try_magic = []
1111 try_magic = []
1105
1112
1106 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1113 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1107 try_magic,
1114 try_magic,
1108 self.custom_completers.flat_matches(self.text_until_cursor)):
1115 self.custom_completers.flat_matches(self.text_until_cursor)):
1109 try:
1116 try:
1110 res = c(event)
1117 res = c(event)
1111 if res:
1118 if res:
1112 # first, try case sensitive match
1119 # first, try case sensitive match
1113 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1120 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1114 if withcase:
1121 if withcase:
1115 return withcase
1122 return withcase
1116 # if none, then case insensitive ones are ok too
1123 # if none, then case insensitive ones are ok too
1117 text_low = text.lower()
1124 text_low = text.lower()
1118 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1125 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1119 except TryNext:
1126 except TryNext:
1120 pass
1127 pass
1121
1128
1122 return None
1129 return None
1123
1130
1124 @_strip_single_trailing_space
1131 @_strip_single_trailing_space
1125 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1132 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1126 """Find completions for the given text and line context.
1133 """Find completions for the given text and line context.
1127
1134
1128 Note that both the text and the line_buffer are optional, but at least
1135 Note that both the text and the line_buffer are optional, but at least
1129 one of them must be given.
1136 one of them must be given.
1130
1137
1131 Parameters
1138 Parameters
1132 ----------
1139 ----------
1133 text : string, optional
1140 text : string, optional
1134 Text to perform the completion on. If not given, the line buffer
1141 Text to perform the completion on. If not given, the line buffer
1135 is split using the instance's CompletionSplitter object.
1142 is split using the instance's CompletionSplitter object.
1136
1143
1137 line_buffer : string, optional
1144 line_buffer : string, optional
1138 If not given, the completer attempts to obtain the current line
1145 If not given, the completer attempts to obtain the current line
1139 buffer via readline. This keyword allows clients which are
1146 buffer via readline. This keyword allows clients which are
1140 requesting for text completions in non-readline contexts to inform
1147 requesting for text completions in non-readline contexts to inform
1141 the completer of the entire text.
1148 the completer of the entire text.
1142
1149
1143 cursor_pos : int, optional
1150 cursor_pos : int, optional
1144 Index of the cursor in the full line buffer. Should be provided by
1151 Index of the cursor in the full line buffer. Should be provided by
1145 remote frontends where kernel has no access to frontend state.
1152 remote frontends where kernel has no access to frontend state.
1146
1153
1147 Returns
1154 Returns
1148 -------
1155 -------
1149 text : str
1156 text : str
1150 Text that was actually used in the completion.
1157 Text that was actually used in the completion.
1151
1158
1152 matches : list
1159 matches : list
1153 A list of completion matches.
1160 A list of completion matches.
1154 """
1161 """
1155 # if the cursor position isn't given, the only sane assumption we can
1162 # if the cursor position isn't given, the only sane assumption we can
1156 # make is that it's at the end of the line (the common case)
1163 # make is that it's at the end of the line (the common case)
1157 if cursor_pos is None:
1164 if cursor_pos is None:
1158 cursor_pos = len(line_buffer) if text is None else len(text)
1165 cursor_pos = len(line_buffer) if text is None else len(text)
1159
1166
1160 if PY3:
1167 if PY3:
1161
1168
1162 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1169 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1163 latex_text, latex_matches = self.latex_matches(base_text)
1170 latex_text, latex_matches = self.latex_matches(base_text)
1164 if latex_matches:
1171 if latex_matches:
1165 return latex_text, latex_matches
1172 return latex_text, latex_matches
1166 name_text = ''
1173 name_text = ''
1167 name_matches = []
1174 name_matches = []
1168 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1175 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1169 name_text, name_matches = meth(base_text)
1176 name_text, name_matches = meth(base_text)
1170 if name_text:
1177 if name_text:
1171 return name_text, name_matches
1178 return name_text, name_matches
1172
1179
1173 # if text is either None or an empty string, rely on the line buffer
1180 # if text is either None or an empty string, rely on the line buffer
1174 if not text:
1181 if not text:
1175 text = self.splitter.split_line(line_buffer, cursor_pos)
1182 text = self.splitter.split_line(line_buffer, cursor_pos)
1176
1183
1177 # If no line buffer is given, assume the input text is all there was
1184 # If no line buffer is given, assume the input text is all there was
1178 if line_buffer is None:
1185 if line_buffer is None:
1179 line_buffer = text
1186 line_buffer = text
1180
1187
1181 self.line_buffer = line_buffer
1188 self.line_buffer = line_buffer
1182 self.text_until_cursor = self.line_buffer[:cursor_pos]
1189 self.text_until_cursor = self.line_buffer[:cursor_pos]
1183
1190
1184 # Start with a clean slate of completions
1191 # Start with a clean slate of completions
1185 self.matches[:] = []
1192 self.matches[:] = []
1186 custom_res = self.dispatch_custom_completer(text)
1193 custom_res = self.dispatch_custom_completer(text)
1187 if custom_res is not None:
1194 if custom_res is not None:
1188 # did custom completers produce something?
1195 # did custom completers produce something?
1189 self.matches = custom_res
1196 self.matches = custom_res
1190 else:
1197 else:
1191 # Extend the list of completions with the results of each
1198 # Extend the list of completions with the results of each
1192 # matcher, so we return results to the user from all
1199 # matcher, so we return results to the user from all
1193 # namespaces.
1200 # namespaces.
1194 if self.merge_completions:
1201 if self.merge_completions:
1195 self.matches = []
1202 self.matches = []
1196 for matcher in self.matchers:
1203 for matcher in self.matchers:
1197 try:
1204 try:
1198 self.matches.extend(matcher(text))
1205 self.matches.extend(matcher(text))
1199 except:
1206 except:
1200 # Show the ugly traceback if the matcher causes an
1207 # Show the ugly traceback if the matcher causes an
1201 # exception, but do NOT crash the kernel!
1208 # exception, but do NOT crash the kernel!
1202 sys.excepthook(*sys.exc_info())
1209 sys.excepthook(*sys.exc_info())
1203 else:
1210 else:
1204 for matcher in self.matchers:
1211 for matcher in self.matchers:
1205 self.matches = matcher(text)
1212 self.matches = matcher(text)
1206 if self.matches:
1213 if self.matches:
1207 break
1214 break
1208 # FIXME: we should extend our api to return a dict with completions for
1215 # FIXME: we should extend our api to return a dict with completions for
1209 # different types of objects. The rlcomplete() method could then
1216 # different types of objects. The rlcomplete() method could then
1210 # simply collapse the dict into a list for readline, but we'd have
1217 # simply collapse the dict into a list for readline, but we'd have
1211 # richer completion semantics in other evironments.
1218 # richer completion semantics in other evironments.
1212 self.matches = sorted(set(self.matches), key=completions_sorting_key)
1219 self.matches = sorted(set(self.matches), key=completions_sorting_key)
1213
1220
1214 return text, self.matches
1221 return text, self.matches
General Comments 0
You need to be logged in to leave comments. Login now