##// END OF EJS Templates
Fix completions for PTK 1.0
Matthias Bussonnier -
Show More
@@ -1,1332 +1,1335 b''
1 1 # encoding: utf-8
2 2 """Word completion for IPython.
3 3
4 4 This module is a fork of the rlcompleter module in the Python standard
5 5 library. The original enhancements made to rlcompleter have been sent
6 6 upstream and were accepted as of Python 2.3, but we need a lot more
7 7 functionality specific to IPython, so this module will continue to live as an
8 8 IPython-specific utility.
9 9
10 10 Original rlcompleter documentation:
11 11
12 12 This requires the latest extension to the readline module (the
13 13 completes keywords, built-ins and globals in __main__; when completing
14 14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
15 15 completes its attributes.
16 16
17 17 It's very cool to do "import string" type "string.", hit the
18 18 completion key (twice), and see the list of names defined by the
19 19 string module!
20 20
21 21 Tip: to use the tab key as the completion key, call
22 22
23 23 readline.parse_and_bind("tab: complete")
24 24
25 25 Notes:
26 26
27 27 - Exceptions raised by the completer function are *ignored* (and
28 28 generally cause the completion to fail). This is a feature -- since
29 29 readline sets the tty device in raw (or cbreak) mode, printing a
30 30 traceback wouldn't work well without some complicated hoopla to save,
31 31 reset and restore the tty state.
32 32
33 33 - The evaluation of the NAME.NAME... form may cause arbitrary
34 34 application defined code to be executed if an object with a
35 35 ``__getattr__`` hook is found. Since it is the responsibility of the
36 36 application (or the user) to enable this feature, I consider this an
37 37 acceptable risk. More complicated expressions (e.g. function calls or
38 38 indexing operations) are *not* evaluated.
39 39
40 40 - GNU readline is also used by the built-in functions input() and
41 41 raw_input(), and thus these also benefit/suffer from the completer
42 42 features. Clearly an interactive application can benefit by
43 43 specifying its own completer function and using raw_input() for all
44 44 its input.
45 45
46 46 - When the original stdin is not a tty device, GNU readline is never
47 47 used, and this module (and the readline module) are silently inactive.
48 48 """
49 49
50 50 # Copyright (c) IPython Development Team.
51 51 # Distributed under the terms of the Modified BSD License.
52 52 #
53 53 # Some of this code originated from rlcompleter in the Python standard library
54 54 # Copyright (C) 2001 Python Software Foundation, www.python.org
55 55
56 56 from __future__ import print_function
57 57
58 58 import __main__
59 59 import glob
60 60 import inspect
61 61 import itertools
62 62 import keyword
63 63 import os
64 64 import re
65 65 import sys
66 66 import unicodedata
67 67 import string
68 68
69 69 from traitlets.config.configurable import Configurable
70 70 from IPython.core.error import TryNext
71 71 from IPython.core.inputsplitter import ESC_MAGIC
72 72 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
73 73 from IPython.utils import generics
74 74 from IPython.utils.decorators import undoc
75 75 from IPython.utils.dir2 import dir2, get_real_method
76 76 from IPython.utils.process import arg_split
77 77 from IPython.utils.py3compat import builtin_mod, string_types, PY3, cast_unicode_py2
78 78 from traitlets import CBool, Enum
79 79
80 80 try:
81 81 import jedi
82 82 import jedi.api.helpers
83 83 import jedi.parser.user_context
84 84 JEDI_INSTALLED = True
85 85 except ImportError:
86 86 JEDI_INSTALLED = False
87 87
88 88 #-----------------------------------------------------------------------------
89 89 # Globals
90 90 #-----------------------------------------------------------------------------
91 91
92 92 # Public API
93 93 __all__ = ['Completer','IPCompleter']
94 94
95 95 if sys.platform == 'win32':
96 96 PROTECTABLES = ' '
97 97 else:
98 98 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
99 99
100 100
101 101 #-----------------------------------------------------------------------------
102 102 # Main functions and classes
103 103 #-----------------------------------------------------------------------------
104 104
105 105 def has_open_quotes(s):
106 106 """Return whether a string has open quotes.
107 107
108 108 This simply counts whether the number of quote characters of either type in
109 109 the string is odd.
110 110
111 111 Returns
112 112 -------
113 113 If there is an open quote, the quote character is returned. Else, return
114 114 False.
115 115 """
116 116 # We check " first, then ', so complex cases with nested quotes will get
117 117 # the " to take precedence.
118 118 if s.count('"') % 2:
119 119 return '"'
120 120 elif s.count("'") % 2:
121 121 return "'"
122 122 else:
123 123 return False
124 124
125 125
126 126 def protect_filename(s):
127 127 """Escape a string to protect certain characters."""
128 128
129 129 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
130 130 for ch in s])
131 131
132 132 def expand_user(path):
133 133 """Expand '~'-style usernames in strings.
134 134
135 135 This is similar to :func:`os.path.expanduser`, but it computes and returns
136 136 extra information that will be useful if the input was being used in
137 137 computing completions, and you wish to return the completions with the
138 138 original '~' instead of its expanded value.
139 139
140 140 Parameters
141 141 ----------
142 142 path : str
143 143 String to be expanded. If no ~ is present, the output is the same as the
144 144 input.
145 145
146 146 Returns
147 147 -------
148 148 newpath : str
149 149 Result of ~ expansion in the input path.
150 150 tilde_expand : bool
151 151 Whether any expansion was performed or not.
152 152 tilde_val : str
153 153 The value that ~ was replaced with.
154 154 """
155 155 # Default values
156 156 tilde_expand = False
157 157 tilde_val = ''
158 158 newpath = path
159 159
160 160 if path.startswith('~'):
161 161 tilde_expand = True
162 162 rest = len(path)-1
163 163 newpath = os.path.expanduser(path)
164 164 if rest:
165 165 tilde_val = newpath[:-rest]
166 166 else:
167 167 tilde_val = newpath
168 168
169 169 return newpath, tilde_expand, tilde_val
170 170
171 171
172 172 def compress_user(path, tilde_expand, tilde_val):
173 173 """Does the opposite of expand_user, with its outputs.
174 174 """
175 175 if tilde_expand:
176 176 return path.replace(tilde_val, '~')
177 177 else:
178 178 return path
179 179
180 180
181 181
182 182 def completions_sorting_key(word):
183 183 """key for sorting completions
184 184
185 185 This does several things:
186 186
187 187 - Lowercase all completions, so they are sorted alphabetically with
188 188 upper and lower case words mingled
189 189 - Demote any completions starting with underscores to the end
190 190 - Insert any %magic and %%cellmagic completions in the alphabetical order
191 191 by their name
192 192 """
193 193 # Case insensitive sort
194 194 word = word.lower()
195 195
196 196 prio1, prio2 = 0, 0
197 197
198 198 if word.startswith('__'):
199 199 prio1 = 2
200 200 elif word.startswith('_'):
201 201 prio1 = 1
202 202
203 203 if word.endswith('='):
204 204 prio1 = -1
205 205
206 206 if word.startswith('%%'):
207 207 # If there's another % in there, this is something else, so leave it alone
208 208 if not "%" in word[2:]:
209 209 word = word[2:]
210 210 prio2 = 2
211 211 elif word.startswith('%'):
212 212 if not "%" in word[1:]:
213 213 word = word[1:]
214 214 prio2 = 1
215 215
216 216 return prio1, word, prio2
217 217
218 218
219 219 @undoc
220 220 class Bunch(object): pass
221 221
222 222
223 223 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
224 224 GREEDY_DELIMS = ' =\r\n'
225 225
226 226
227 227 class CompletionSplitter(object):
228 228 """An object to split an input line in a manner similar to readline.
229 229
230 230 By having our own implementation, we can expose readline-like completion in
231 231 a uniform manner to all frontends. This object only needs to be given the
232 232 line of text to be split and the cursor position on said line, and it
233 233 returns the 'word' to be completed on at the cursor after splitting the
234 234 entire line.
235 235
236 236 What characters are used as splitting delimiters can be controlled by
237 237 setting the `delims` attribute (this is a property that internally
238 238 automatically builds the necessary regular expression)"""
239 239
240 240 # Private interface
241 241
242 242 # A string of delimiter characters. The default value makes sense for
243 243 # IPython's most typical usage patterns.
244 244 _delims = DELIMS
245 245
246 246 # The expression (a normal string) to be compiled into a regular expression
247 247 # for actual splitting. We store it as an attribute mostly for ease of
248 248 # debugging, since this type of code can be so tricky to debug.
249 249 _delim_expr = None
250 250
251 251 # The regular expression that does the actual splitting
252 252 _delim_re = None
253 253
254 254 def __init__(self, delims=None):
255 255 delims = CompletionSplitter._delims if delims is None else delims
256 256 self.delims = delims
257 257
258 258 @property
259 259 def delims(self):
260 260 """Return the string of delimiter characters."""
261 261 return self._delims
262 262
263 263 @delims.setter
264 264 def delims(self, delims):
265 265 """Set the delimiters for line splitting."""
266 266 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
267 267 self._delim_re = re.compile(expr)
268 268 self._delims = delims
269 269 self._delim_expr = expr
270 270
271 271 def split_line(self, line, cursor_pos=None):
272 272 """Split a line of text with a cursor at the given position.
273 273 """
274 274 l = line if cursor_pos is None else line[:cursor_pos]
275 275 return self._delim_re.split(l)[-1]
276 276
277 277
278 278 class Completer(Configurable):
279 279
280 280 greedy = CBool(False, config=True,
281 281 help="""Activate greedy completion
282 282 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
283 283
284 284 This will enable completion on elements of lists, results of function calls, etc.,
285 285 but can be unsafe because the code is actually evaluated on TAB.
286 286 """
287 287 )
288 288
289 289
290 290 def __init__(self, namespace=None, global_namespace=None, **kwargs):
291 291 """Create a new completer for the command line.
292 292
293 293 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
294 294
295 295 If unspecified, the default namespace where completions are performed
296 296 is __main__ (technically, __main__.__dict__). Namespaces should be
297 297 given as dictionaries.
298 298
299 299 An optional second namespace can be given. This allows the completer
300 300 to handle cases where both the local and global scopes need to be
301 301 distinguished.
302 302
303 303 Completer instances should be used as the completion mechanism of
304 304 readline via the set_completer() call:
305 305
306 306 readline.set_completer(Completer(my_namespace).complete)
307 307 """
308 308
309 309 # Don't bind to namespace quite yet, but flag whether the user wants a
310 310 # specific namespace or to use __main__.__dict__. This will allow us
311 311 # to bind to __main__.__dict__ at completion time, not now.
312 312 if namespace is None:
313 313 self.use_main_ns = 1
314 314 else:
315 315 self.use_main_ns = 0
316 316 self.namespace = namespace
317 317
318 318 # The global namespace, if given, can be bound directly
319 319 if global_namespace is None:
320 320 self.global_namespace = {}
321 321 else:
322 322 self.global_namespace = global_namespace
323 323
324 324 super(Completer, self).__init__(**kwargs)
325 325
326 326 def complete(self, text, state):
327 327 """Return the next possible completion for 'text'.
328 328
329 329 This is called successively with state == 0, 1, 2, ... until it
330 330 returns None. The completion should begin with 'text'.
331 331
332 332 """
333 333 if self.use_main_ns:
334 334 self.namespace = __main__.__dict__
335 335
336 336 if state == 0:
337 337 if "." in text:
338 338 self.matches = self.attr_matches(text)
339 339 else:
340 340 self.matches = self.global_matches(text)
341 341 try:
342 342 return self.matches[state]
343 343 except IndexError:
344 344 return None
345 345
346 346 def global_matches(self, text):
347 347 """Compute matches when text is a simple name.
348 348
349 349 Return a list of all keywords, built-in functions and names currently
350 350 defined in self.namespace or self.global_namespace that match.
351 351
352 352 """
353 353 matches = []
354 354 match_append = matches.append
355 355 n = len(text)
356 356 for lst in [keyword.kwlist,
357 357 builtin_mod.__dict__.keys(),
358 358 self.namespace.keys(),
359 359 self.global_namespace.keys()]:
360 360 for word in lst:
361 361 if word[:n] == text and word != "__builtins__":
362 362 match_append(word)
363 363 return [cast_unicode_py2(m) for m in matches]
364 364
365 365 def attr_matches(self, text):
366 366 """Compute matches when text contains a dot.
367 367
368 368 Assuming the text is of the form NAME.NAME....[NAME], and is
369 369 evaluatable in self.namespace or self.global_namespace, it will be
370 370 evaluated and its attributes (as revealed by dir()) are used as
371 371 possible completions. (For class instances, class members are are
372 372 also considered.)
373 373
374 374 WARNING: this can still invoke arbitrary C code, if an object
375 375 with a __getattr__ hook is evaluated.
376 376
377 377 """
378 378
379 379 # Another option, seems to work great. Catches things like ''.<tab>
380 380 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
381 381
382 382 if m:
383 383 expr, attr = m.group(1, 3)
384 384 elif self.greedy:
385 385 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
386 386 if not m2:
387 387 return []
388 388 expr, attr = m2.group(1,2)
389 389 else:
390 390 return []
391 391
392 392 try:
393 393 obj = eval(expr, self.namespace)
394 394 except:
395 395 try:
396 396 obj = eval(expr, self.global_namespace)
397 397 except:
398 398 return []
399 399
400 400 if self.limit_to__all__ and hasattr(obj, '__all__'):
401 401 words = get__all__entries(obj)
402 402 else:
403 403 words = dir2(obj)
404 404
405 405 try:
406 406 words = generics.complete_object(obj, words)
407 407 except TryNext:
408 408 pass
409 409 except Exception:
410 410 # Silence errors from completion function
411 411 #raise # dbg
412 412 pass
413 413 # Build match list to return
414 414 n = len(attr)
415 415 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
416 416
417 417
418 418 def get__all__entries(obj):
419 419 """returns the strings in the __all__ attribute"""
420 420 try:
421 421 words = getattr(obj, '__all__')
422 422 except:
423 423 return []
424 424
425 425 return [cast_unicode_py2(w) for w in words if isinstance(w, string_types)]
426 426
427 427
428 428 def match_dict_keys(keys, prefix, delims):
429 429 """Used by dict_key_matches, matching the prefix to a list of keys"""
430 430 if not prefix:
431 431 return None, 0, [repr(k) for k in keys
432 432 if isinstance(k, (string_types, bytes))]
433 433 quote_match = re.search('["\']', prefix)
434 434 quote = quote_match.group()
435 435 try:
436 436 prefix_str = eval(prefix + quote, {})
437 437 except Exception:
438 438 return None, 0, []
439 439
440 440 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
441 441 token_match = re.search(pattern, prefix, re.UNICODE)
442 442 token_start = token_match.start()
443 443 token_prefix = token_match.group()
444 444
445 445 # TODO: support bytes in Py3k
446 446 matched = []
447 447 for key in keys:
448 448 try:
449 449 if not key.startswith(prefix_str):
450 450 continue
451 451 except (AttributeError, TypeError, UnicodeError):
452 452 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
453 453 continue
454 454
455 455 # reformat remainder of key to begin with prefix
456 456 rem = key[len(prefix_str):]
457 457 # force repr wrapped in '
458 458 rem_repr = repr(rem + '"')
459 459 if rem_repr.startswith('u') and prefix[0] not in 'uU':
460 460 # Found key is unicode, but prefix is Py2 string.
461 461 # Therefore attempt to interpret key as string.
462 462 try:
463 463 rem_repr = repr(rem.encode('ascii') + '"')
464 464 except UnicodeEncodeError:
465 465 continue
466 466
467 467 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
468 468 if quote == '"':
469 469 # The entered prefix is quoted with ",
470 470 # but the match is quoted with '.
471 471 # A contained " hence needs escaping for comparison:
472 472 rem_repr = rem_repr.replace('"', '\\"')
473 473
474 474 # then reinsert prefix from start of token
475 475 matched.append('%s%s' % (token_prefix, rem_repr))
476 476 return quote, token_start, matched
477 477
478 478
479 479 def _safe_isinstance(obj, module, class_name):
480 480 """Checks if obj is an instance of module.class_name if loaded
481 481 """
482 482 return (module in sys.modules and
483 483 isinstance(obj, getattr(__import__(module), class_name)))
484 484
485 485
486 486 def back_unicode_name_matches(text):
487 487 u"""Match unicode characters back to unicode name
488 488
489 489 This does β˜ƒ -> \\snowman
490 490
491 491 Note that snowman is not a valid python3 combining character but will be expanded.
492 492 Though it will not recombine back to the snowman character by the completion machinery.
493 493
494 494 This will not either back-complete standard sequences like \\n, \\b ...
495 495
496 496 Used on Python 3 only.
497 497 """
498 498 if len(text)<2:
499 499 return u'', ()
500 500 maybe_slash = text[-2]
501 501 if maybe_slash != '\\':
502 502 return u'', ()
503 503
504 504 char = text[-1]
505 505 # no expand on quote for completion in strings.
506 506 # nor backcomplete standard ascii keys
507 507 if char in string.ascii_letters or char in ['"',"'"]:
508 508 return u'', ()
509 509 try :
510 510 unic = unicodedata.name(char)
511 511 return '\\'+char,['\\'+unic]
512 512 except KeyError as e:
513 513 pass
514 514 return u'', ()
515 515
516 516 def back_latex_name_matches(text):
517 517 u"""Match latex characters back to unicode name
518 518
519 519 This does ->\\sqrt
520 520
521 521 Used on Python 3 only.
522 522 """
523 523 if len(text)<2:
524 524 return u'', ()
525 525 maybe_slash = text[-2]
526 526 if maybe_slash != '\\':
527 527 return u'', ()
528 528
529 529
530 530 char = text[-1]
531 531 # no expand on quote for completion in strings.
532 532 # nor backcomplete standard ascii keys
533 533 if char in string.ascii_letters or char in ['"',"'"]:
534 534 return u'', ()
535 535 try :
536 536 latex = reverse_latex_symbol[char]
537 537 # '\\' replace the \ as well
538 538 return '\\'+char,[latex]
539 539 except KeyError as e:
540 540 pass
541 541 return u'', ()
542 542
543 543
544 544 class IPCompleter(Completer):
545 545 """Extension of the completer class with IPython-specific features"""
546 546
547 547 def _greedy_changed(self, name, old, new):
548 548 """update the splitter and readline delims when greedy is changed"""
549 549 if new:
550 550 self.splitter.delims = GREEDY_DELIMS
551 551 else:
552 552 self.splitter.delims = DELIMS
553 553
554 554 if self.readline:
555 555 self.readline.set_completer_delims(self.splitter.delims)
556 556
557 557 merge_completions = CBool(True, config=True,
558 558 help="""Whether to merge completion results into a single list
559 559
560 560 If False, only the completion results from the first non-empty
561 561 completer will be returned.
562 562 """
563 563 )
564 564 omit__names = Enum((0,1,2), default_value=2, config=True,
565 565 help="""Instruct the completer to omit private method names
566 566
567 567 Specifically, when completing on ``object.<tab>``.
568 568
569 569 When 2 [default]: all names that start with '_' will be excluded.
570 570
571 571 When 1: all 'magic' names (``__foo__``) will be excluded.
572 572
573 573 When 0: nothing will be excluded.
574 574 """
575 575 )
576 576 limit_to__all__ = CBool(default_value=False, config=True,
577 577 help="""
578 578 DEPRECATED as of version 5.0.
579 579
580 580 Instruct the completer to use __all__ for the completion
581 581
582 582 Specifically, when completing on ``object.<tab>``.
583 583
584 584 When True: only those names in obj.__all__ will be included.
585 585
586 586 When False [default]: the __all__ attribute is ignored
587 587 """
588 588 )
589 589 use_jedi_completions = CBool(default_value=JEDI_INSTALLED, config=True,
590 590 help="""Use Jedi to generate autocompletions.
591 591 """)
592 592
593 593 def __init__(self, shell=None, namespace=None, global_namespace=None,
594 594 use_readline=True, config=None, **kwargs):
595 595 """IPCompleter() -> completer
596 596
597 597 Return a completer object suitable for use by the readline library
598 598 via readline.set_completer().
599 599
600 600 Inputs:
601 601
602 602 - shell: a pointer to the ipython shell itself. This is needed
603 603 because this completer knows about magic functions, and those can
604 604 only be accessed via the ipython instance.
605 605
606 606 - namespace: an optional dict where completions are performed.
607 607
608 608 - global_namespace: secondary optional dict for completions, to
609 609 handle cases (such as IPython embedded inside functions) where
610 610 both Python scopes are visible.
611 611
612 612 use_readline : bool, optional
613 613 If true, use the readline library. This completer can still function
614 614 without readline, though in that case callers must provide some extra
615 615 information on each call about the current line."""
616 616
617 617 self.magic_escape = ESC_MAGIC
618 618 self.splitter = CompletionSplitter()
619 619
620 620 # Readline configuration, only used by the rlcompleter method.
621 621 if use_readline:
622 622 # We store the right version of readline so that later code
623 623 import IPython.utils.rlineimpl as readline
624 624 self.readline = readline
625 625 else:
626 626 self.readline = None
627 627
628 628 # _greedy_changed() depends on splitter and readline being defined:
629 629 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
630 630 config=config, **kwargs)
631 631
632 632 # List where completion matches will be stored
633 633 self.matches = []
634 634 self.shell = shell
635 635 # Regexp to split filenames with spaces in them
636 636 self.space_name_re = re.compile(r'([^\\] )')
637 637 # Hold a local ref. to glob.glob for speed
638 638 self.glob = glob.glob
639 639
640 640 # Determine if we are running on 'dumb' terminals, like (X)Emacs
641 641 # buffers, to avoid completion problems.
642 642 term = os.environ.get('TERM','xterm')
643 643 self.dumb_terminal = term in ['dumb','emacs']
644 644
645 645 # Special handling of backslashes needed in win32 platforms
646 646 if sys.platform == "win32":
647 647 self.clean_glob = self._clean_glob_win32
648 648 else:
649 649 self.clean_glob = self._clean_glob
650 650
651 651 #regexp to parse docstring for function signature
652 652 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
653 653 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
654 654 #use this if positional argument name is also needed
655 655 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
656 656
657 657 # All active matcher routines for completion
658 658 self.matchers = [
659 659 self.file_matches,
660 660 self.magic_matches,
661 661 self.python_func_kw_matches,
662 662 self.dict_key_matches,
663 663 ]
664 664
665 665 def all_completions(self, text):
666 666 """
667 667 Wrapper around the complete method for the benefit of emacs
668 668 and pydb.
669 669 """
670 670 return self.complete(text)[1]
671 671
672 672 def _clean_glob(self, text):
673 673 return self.glob("%s*" % text)
674 674
675 675 def _clean_glob_win32(self,text):
676 676 return [f.replace("\\","/")
677 677 for f in self.glob("%s*" % text)]
678 678
679 679 def file_matches(self, text):
680 680 """Match filenames, expanding ~USER type strings.
681 681
682 682 Most of the seemingly convoluted logic in this completer is an
683 683 attempt to handle filenames with spaces in them. And yet it's not
684 684 quite perfect, because Python's readline doesn't expose all of the
685 685 GNU readline details needed for this to be done correctly.
686 686
687 687 For a filename with a space in it, the printed completions will be
688 688 only the parts after what's already been typed (instead of the
689 689 full completions, as is normally done). I don't think with the
690 690 current (as of Python 2.3) Python readline it's possible to do
691 691 better."""
692 692
693 693 # chars that require escaping with backslash - i.e. chars
694 694 # that readline treats incorrectly as delimiters, but we
695 695 # don't want to treat as delimiters in filename matching
696 696 # when escaped with backslash
697 697 if text.startswith('!'):
698 698 text = text[1:]
699 699 text_prefix = u'!'
700 700 else:
701 701 text_prefix = u''
702 702
703 703 text_until_cursor = self.text_until_cursor
704 704 # track strings with open quotes
705 705 open_quotes = has_open_quotes(text_until_cursor)
706 706
707 707 if '(' in text_until_cursor or '[' in text_until_cursor:
708 708 lsplit = text
709 709 else:
710 710 try:
711 711 # arg_split ~ shlex.split, but with unicode bugs fixed by us
712 712 lsplit = arg_split(text_until_cursor)[-1]
713 713 except ValueError:
714 714 # typically an unmatched ", or backslash without escaped char.
715 715 if open_quotes:
716 716 lsplit = text_until_cursor.split(open_quotes)[-1]
717 717 else:
718 718 return []
719 719 except IndexError:
720 720 # tab pressed on empty line
721 721 lsplit = ""
722 722
723 723 if not open_quotes and lsplit != protect_filename(lsplit):
724 724 # if protectables are found, do matching on the whole escaped name
725 725 has_protectables = True
726 726 text0,text = text,lsplit
727 727 else:
728 728 has_protectables = False
729 729 text = os.path.expanduser(text)
730 730
731 731 if text == "":
732 732 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
733 733
734 734 # Compute the matches from the filesystem
735 735 m0 = self.clean_glob(text.replace('\\',''))
736 736
737 737 if has_protectables:
738 738 # If we had protectables, we need to revert our changes to the
739 739 # beginning of filename so that we don't double-write the part
740 740 # of the filename we have so far
741 741 len_lsplit = len(lsplit)
742 742 matches = [text_prefix + text0 +
743 743 protect_filename(f[len_lsplit:]) for f in m0]
744 744 else:
745 745 if open_quotes:
746 746 # if we have a string with an open quote, we don't need to
747 747 # protect the names at all (and we _shouldn't_, as it
748 748 # would cause bugs when the filesystem call is made).
749 749 matches = m0
750 750 else:
751 751 matches = [text_prefix +
752 752 protect_filename(f) for f in m0]
753 753
754 754 # Mark directories in input list by appending '/' to their names.
755 755 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
756 756
757 757 def magic_matches(self, text):
758 758 """Match magics"""
759 759 # Get all shell magics now rather than statically, so magics loaded at
760 760 # runtime show up too.
761 761 lsm = self.shell.magics_manager.lsmagic()
762 762 line_magics = lsm['line']
763 763 cell_magics = lsm['cell']
764 764 pre = self.magic_escape
765 765 pre2 = pre+pre
766 766
767 767 # Completion logic:
768 768 # - user gives %%: only do cell magics
769 769 # - user gives %: do both line and cell magics
770 770 # - no prefix: do both
771 771 # In other words, line magics are skipped if the user gives %% explicitly
772 772 bare_text = text.lstrip(pre)
773 773 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
774 774 if not text.startswith(pre2):
775 775 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
776 776 return [cast_unicode_py2(c) for c in comp]
777 777
778 778 def python_jedi_matches(self, text, line_buffer, cursor_pos):
779 779 """Match attributes or global Python names using Jedi."""
780 780 if line_buffer.startswith('aimport ') or line_buffer.startswith('%aimport '):
781 781 return ()
782 782 namespaces = []
783 783 if self.namespace is None:
784 784 import __main__
785 785 namespaces.append(__main__.__dict__)
786 786 else:
787 787 namespaces.append(self.namespace)
788 788 if self.global_namespace is not None:
789 789 namespaces.append(self.global_namespace)
790 790
791 791 # cursor_pos is an it, jedi wants line and column
792 792
793 793 interpreter = jedi.Interpreter(line_buffer, namespaces, column=cursor_pos)
794 794 path = jedi.parser.user_context.UserContext(line_buffer, \
795 795 (1, len(line_buffer))).get_path_until_cursor()
796 796 path, dot, like = jedi.api.helpers.completion_parts(path)
797 797 if text.startswith('.'):
798 798 # text will be `.` on completions like `a[0].<tab>`
799 799 before = dot
800 800 else:
801 801 before = line_buffer[:len(line_buffer) - len(like)]
802 802
803 803
804 804 def trim_start(completion):
805 805 """completions need to start with `text`, trim the beginning until it does"""
806 if text in completion and not (completion.startswith(text)):
807 start_index = completion.index(text)
806 ltext = text.lower()
807 lcomp = completion.lower()
808 if ltext in lcomp and not (lcomp.startswith(ltext)):
809 start_index = lcomp.index(ltext)
808 810 if cursor_pos:
809 assert start_index < cursor_pos
811 if start_index >= cursor_pos:
812 start_index = min(start_index, cursor_pos)
810 813 return completion[start_index:]
811 814 return completion
812 815
813 816 completions = interpreter.completions()
814 817
815 818 completion_text = [c.name_with_symbols for c in completions]
816 819
817 820 if self.omit__names:
818 821 if self.omit__names == 1:
819 822 # true if txt is _not_ a __ name, false otherwise:
820 823 no__name = lambda txt: not txt.startswith('__')
821 824 else:
822 825 # true if txt is _not_ a _ name, false otherwise:
823 826 no__name = lambda txt: not txt.startswith('_')
824 827 completion_text = filter(no__name, completion_text)
825 828
826 829
827 830 return [trim_start(before + c_text) for c_text in completion_text]
828 831
829 832
830 833 def python_matches(self, text):
831 834 """Match attributes or global python names"""
832 835 # Jedi completion
833 836
834 837 if "." in text:
835 838 try:
836 839 matches = self.attr_matches(text)
837 840 if text.endswith('.') and self.omit__names:
838 841 if self.omit__names == 1:
839 842 # true if txt is _not_ a __ name, false otherwise:
840 843 no__name = (lambda txt:
841 844 re.match(r'.*\.__.*?__',txt) is None)
842 845 else:
843 846 # true if txt is _not_ a _ name, false otherwise:
844 847 no__name = (lambda txt:
845 848 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
846 849 matches = filter(no__name, matches)
847 850 except NameError:
848 851 # catches <undefined attributes>.<tab>
849 852 matches = []
850 853 else:
851 854 matches = self.global_matches(text)
852 855 return matches
853 856
854 857 def _default_arguments_from_docstring(self, doc):
855 858 """Parse the first line of docstring for call signature.
856 859
857 860 Docstring should be of the form 'min(iterable[, key=func])\n'.
858 861 It can also parse cython docstring of the form
859 862 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
860 863 """
861 864 if doc is None:
862 865 return []
863 866
864 867 #care only the firstline
865 868 line = doc.lstrip().splitlines()[0]
866 869
867 870 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
868 871 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
869 872 sig = self.docstring_sig_re.search(line)
870 873 if sig is None:
871 874 return []
872 875 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
873 876 sig = sig.groups()[0].split(',')
874 877 ret = []
875 878 for s in sig:
876 879 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
877 880 ret += self.docstring_kwd_re.findall(s)
878 881 return ret
879 882
880 883 def _default_arguments(self, obj):
881 884 """Return the list of default arguments of obj if it is callable,
882 885 or empty list otherwise."""
883 886 call_obj = obj
884 887 ret = []
885 888 if inspect.isbuiltin(obj):
886 889 pass
887 890 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
888 891 if inspect.isclass(obj):
889 892 #for cython embededsignature=True the constructor docstring
890 893 #belongs to the object itself not __init__
891 894 ret += self._default_arguments_from_docstring(
892 895 getattr(obj, '__doc__', ''))
893 896 # for classes, check for __init__,__new__
894 897 call_obj = (getattr(obj, '__init__', None) or
895 898 getattr(obj, '__new__', None))
896 899 # for all others, check if they are __call__able
897 900 elif hasattr(obj, '__call__'):
898 901 call_obj = obj.__call__
899 902 ret += self._default_arguments_from_docstring(
900 903 getattr(call_obj, '__doc__', ''))
901 904
902 905 if PY3:
903 906 _keeps = (inspect.Parameter.KEYWORD_ONLY,
904 907 inspect.Parameter.POSITIONAL_OR_KEYWORD)
905 908 signature = inspect.signature
906 909 else:
907 910 import IPython.utils.signatures
908 911 _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
909 912 IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
910 913 signature = IPython.utils.signatures.signature
911 914
912 915 try:
913 916 sig = signature(call_obj)
914 917 ret.extend(k for k, v in sig.parameters.items() if
915 918 v.kind in _keeps)
916 919 except ValueError:
917 920 pass
918 921
919 922 return list(set(ret))
920 923
921 924 def python_func_kw_matches(self,text):
922 925 """Match named parameters (kwargs) of the last open function"""
923 926
924 927 if "." in text: # a parameter cannot be dotted
925 928 return []
926 929 try: regexp = self.__funcParamsRegex
927 930 except AttributeError:
928 931 regexp = self.__funcParamsRegex = re.compile(r'''
929 932 '.*?(?<!\\)' | # single quoted strings or
930 933 ".*?(?<!\\)" | # double quoted strings or
931 934 \w+ | # identifier
932 935 \S # other characters
933 936 ''', re.VERBOSE | re.DOTALL)
934 937 # 1. find the nearest identifier that comes before an unclosed
935 938 # parenthesis before the cursor
936 939 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
937 940 tokens = regexp.findall(self.text_until_cursor)
938 941 tokens.reverse()
939 942 iterTokens = iter(tokens); openPar = 0
940 943
941 944 for token in iterTokens:
942 945 if token == ')':
943 946 openPar -= 1
944 947 elif token == '(':
945 948 openPar += 1
946 949 if openPar > 0:
947 950 # found the last unclosed parenthesis
948 951 break
949 952 else:
950 953 return []
951 954 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
952 955 ids = []
953 956 isId = re.compile(r'\w+$').match
954 957
955 958 while True:
956 959 try:
957 960 ids.append(next(iterTokens))
958 961 if not isId(ids[-1]):
959 962 ids.pop(); break
960 963 if not next(iterTokens) == '.':
961 964 break
962 965 except StopIteration:
963 966 break
964 967 # lookup the candidate callable matches either using global_matches
965 968 # or attr_matches for dotted names
966 969 if len(ids) == 1:
967 970 callableMatches = self.global_matches(ids[0])
968 971 else:
969 972 callableMatches = self.attr_matches('.'.join(ids[::-1]))
970 973 argMatches = []
971 974 for callableMatch in callableMatches:
972 975 try:
973 976 namedArgs = self._default_arguments(eval(callableMatch,
974 977 self.namespace))
975 978 except:
976 979 continue
977 980
978 981 for namedArg in namedArgs:
979 982 if namedArg.startswith(text):
980 983 argMatches.append(u"%s=" %namedArg)
981 984 return argMatches
982 985
983 986 def dict_key_matches(self, text):
984 987 "Match string keys in a dictionary, after e.g. 'foo[' "
985 988 def get_keys(obj):
986 989 # Objects can define their own completions by defining an
987 990 # _ipy_key_completions_() method.
988 991 method = get_real_method(obj, '_ipython_key_completions_')
989 992 if method is not None:
990 993 return method()
991 994
992 995 # Special case some common in-memory dict-like types
993 996 if isinstance(obj, dict) or\
994 997 _safe_isinstance(obj, 'pandas', 'DataFrame'):
995 998 try:
996 999 return list(obj.keys())
997 1000 except Exception:
998 1001 return []
999 1002 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1000 1003 _safe_isinstance(obj, 'numpy', 'void'):
1001 1004 return obj.dtype.names or []
1002 1005 return []
1003 1006
1004 1007 try:
1005 1008 regexps = self.__dict_key_regexps
1006 1009 except AttributeError:
1007 1010 dict_key_re_fmt = r'''(?x)
1008 1011 ( # match dict-referring expression wrt greedy setting
1009 1012 %s
1010 1013 )
1011 1014 \[ # open bracket
1012 1015 \s* # and optional whitespace
1013 1016 ([uUbB]? # string prefix (r not handled)
1014 1017 (?: # unclosed string
1015 1018 '(?:[^']|(?<!\\)\\')*
1016 1019 |
1017 1020 "(?:[^"]|(?<!\\)\\")*
1018 1021 )
1019 1022 )?
1020 1023 $
1021 1024 '''
1022 1025 regexps = self.__dict_key_regexps = {
1023 1026 False: re.compile(dict_key_re_fmt % '''
1024 1027 # identifiers separated by .
1025 1028 (?!\d)\w+
1026 1029 (?:\.(?!\d)\w+)*
1027 1030 '''),
1028 1031 True: re.compile(dict_key_re_fmt % '''
1029 1032 .+
1030 1033 ''')
1031 1034 }
1032 1035
1033 1036 match = regexps[self.greedy].search(self.text_until_cursor)
1034 1037 if match is None:
1035 1038 return []
1036 1039
1037 1040 expr, prefix = match.groups()
1038 1041 try:
1039 1042 obj = eval(expr, self.namespace)
1040 1043 except Exception:
1041 1044 try:
1042 1045 obj = eval(expr, self.global_namespace)
1043 1046 except Exception:
1044 1047 return []
1045 1048
1046 1049 keys = get_keys(obj)
1047 1050 if not keys:
1048 1051 return keys
1049 1052 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1050 1053 if not matches:
1051 1054 return matches
1052 1055
1053 1056 # get the cursor position of
1054 1057 # - the text being completed
1055 1058 # - the start of the key text
1056 1059 # - the start of the completion
1057 1060 text_start = len(self.text_until_cursor) - len(text)
1058 1061 if prefix:
1059 1062 key_start = match.start(2)
1060 1063 completion_start = key_start + token_offset
1061 1064 else:
1062 1065 key_start = completion_start = match.end()
1063 1066
1064 1067 # grab the leading prefix, to make sure all completions start with `text`
1065 1068 if text_start > key_start:
1066 1069 leading = ''
1067 1070 else:
1068 1071 leading = text[text_start:completion_start]
1069 1072
1070 1073 # the index of the `[` character
1071 1074 bracket_idx = match.end(1)
1072 1075
1073 1076 # append closing quote and bracket as appropriate
1074 1077 # this is *not* appropriate if the opening quote or bracket is outside
1075 1078 # the text given to this method
1076 1079 suf = ''
1077 1080 continuation = self.line_buffer[len(self.text_until_cursor):]
1078 1081 if key_start > text_start and closing_quote:
1079 1082 # quotes were opened inside text, maybe close them
1080 1083 if continuation.startswith(closing_quote):
1081 1084 continuation = continuation[len(closing_quote):]
1082 1085 else:
1083 1086 suf += closing_quote
1084 1087 if bracket_idx > text_start:
1085 1088 # brackets were opened inside text, maybe close them
1086 1089 if not continuation.startswith(']'):
1087 1090 suf += ']'
1088 1091
1089 1092 return [leading + k + suf for k in matches]
1090 1093
1091 1094 def unicode_name_matches(self, text):
1092 1095 u"""Match Latex-like syntax for unicode characters base
1093 1096 on the name of the character.
1094 1097
1095 1098 This does \\GREEK SMALL LETTER ETA -> Ξ·
1096 1099
1097 1100 Works only on valid python 3 identifier, or on combining characters that
1098 1101 will combine to form a valid identifier.
1099 1102
1100 1103 Used on Python 3 only.
1101 1104 """
1102 1105 slashpos = text.rfind('\\')
1103 1106 if slashpos > -1:
1104 1107 s = text[slashpos+1:]
1105 1108 try :
1106 1109 unic = unicodedata.lookup(s)
1107 1110 # allow combining chars
1108 1111 if ('a'+unic).isidentifier():
1109 1112 return '\\'+s,[unic]
1110 1113 except KeyError as e:
1111 1114 pass
1112 1115 return u'', []
1113 1116
1114 1117
1115 1118
1116 1119
1117 1120 def latex_matches(self, text):
1118 1121 u"""Match Latex syntax for unicode characters.
1119 1122
1120 1123 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1121 1124
1122 1125 Used on Python 3 only.
1123 1126 """
1124 1127 slashpos = text.rfind('\\')
1125 1128 if slashpos > -1:
1126 1129 s = text[slashpos:]
1127 1130 if s in latex_symbols:
1128 1131 # Try to complete a full latex symbol to unicode
1129 1132 # \\alpha -> Ξ±
1130 1133 return s, [latex_symbols[s]]
1131 1134 else:
1132 1135 # If a user has partially typed a latex symbol, give them
1133 1136 # a full list of options \al -> [\aleph, \alpha]
1134 1137 matches = [k for k in latex_symbols if k.startswith(s)]
1135 1138 return s, matches
1136 1139 return u'', []
1137 1140
1138 1141 def dispatch_custom_completer(self, text):
1139 1142 line = self.line_buffer
1140 1143 if not line.strip():
1141 1144 return None
1142 1145
1143 1146 # Create a little structure to pass all the relevant information about
1144 1147 # the current completion to any custom completer.
1145 1148 event = Bunch()
1146 1149 event.line = line
1147 1150 event.symbol = text
1148 1151 cmd = line.split(None,1)[0]
1149 1152 event.command = cmd
1150 1153 event.text_until_cursor = self.text_until_cursor
1151 1154
1152 1155 # for foo etc, try also to find completer for %foo
1153 1156 if not cmd.startswith(self.magic_escape):
1154 1157 try_magic = self.custom_completers.s_matches(
1155 1158 self.magic_escape + cmd)
1156 1159 else:
1157 1160 try_magic = []
1158 1161
1159 1162 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1160 1163 try_magic,
1161 1164 self.custom_completers.flat_matches(self.text_until_cursor)):
1162 1165 try:
1163 1166 res = c(event)
1164 1167 if res:
1165 1168 # first, try case sensitive match
1166 1169 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1167 1170 if withcase:
1168 1171 return withcase
1169 1172 # if none, then case insensitive ones are ok too
1170 1173 text_low = text.lower()
1171 1174 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1172 1175 except TryNext:
1173 1176 pass
1174 1177
1175 1178 return None
1176 1179
1177 1180 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1178 1181 """Find completions for the given text and line context.
1179 1182
1180 1183 Note that both the text and the line_buffer are optional, but at least
1181 1184 one of them must be given.
1182 1185
1183 1186 Parameters
1184 1187 ----------
1185 1188 text : string, optional
1186 1189 Text to perform the completion on. If not given, the line buffer
1187 1190 is split using the instance's CompletionSplitter object.
1188 1191
1189 1192 line_buffer : string, optional
1190 1193 If not given, the completer attempts to obtain the current line
1191 1194 buffer via readline. This keyword allows clients which are
1192 1195 requesting for text completions in non-readline contexts to inform
1193 1196 the completer of the entire text.
1194 1197
1195 1198 cursor_pos : int, optional
1196 1199 Index of the cursor in the full line buffer. Should be provided by
1197 1200 remote frontends where kernel has no access to frontend state.
1198 1201
1199 1202 Returns
1200 1203 -------
1201 1204 text : str
1202 1205 Text that was actually used in the completion.
1203 1206
1204 1207 matches : list
1205 1208 A list of completion matches.
1206 1209 """
1207 1210 # if the cursor position isn't given, the only sane assumption we can
1208 1211 # make is that it's at the end of the line (the common case)
1209 1212 if cursor_pos is None:
1210 1213 cursor_pos = len(line_buffer) if text is None else len(text)
1211 1214
1212 1215 if PY3:
1213 1216
1214 1217 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1215 1218 latex_text, latex_matches = self.latex_matches(base_text)
1216 1219 if latex_matches:
1217 1220 return latex_text, latex_matches
1218 1221 name_text = ''
1219 1222 name_matches = []
1220 1223 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1221 1224 name_text, name_matches = meth(base_text)
1222 1225 if name_text:
1223 1226 return name_text, name_matches
1224 1227
1225 1228 # if text is either None or an empty string, rely on the line buffer
1226 1229 if not text:
1227 1230 text = self.splitter.split_line(line_buffer, cursor_pos)
1228 1231
1229 1232 # If no line buffer is given, assume the input text is all there was
1230 1233 if line_buffer is None:
1231 1234 line_buffer = text
1232 1235
1233 1236 self.line_buffer = line_buffer
1234 1237 self.text_until_cursor = self.line_buffer[:cursor_pos]
1235 1238
1236 1239 # Start with a clean slate of completions
1237 1240 self.matches[:] = []
1238 1241 custom_res = self.dispatch_custom_completer(text)
1239 1242 if custom_res is not None:
1240 1243 # did custom completers produce something?
1241 1244 self.matches = custom_res
1242 1245 else:
1243 1246 # Extend the list of completions with the results of each
1244 1247 # matcher, so we return results to the user from all
1245 1248 # namespaces.
1246 1249 if self.merge_completions:
1247 1250 self.matches = []
1248 1251 for matcher in self.matchers:
1249 1252 try:
1250 1253 self.matches.extend(matcher(text))
1251 1254 except:
1252 1255 # Show the ugly traceback if the matcher causes an
1253 1256 # exception, but do NOT crash the kernel!
1254 1257 sys.excepthook(*sys.exc_info())
1255 1258 else:
1256 1259 for matcher in self.matchers:
1257 1260 self.matches = matcher(text)
1258 1261 if self.matches:
1259 1262 break
1260 1263 # FIXME: we should extend our api to return a dict with completions for
1261 1264 # different types of objects. The rlcomplete() method could then
1262 1265 # simply collapse the dict into a list for readline, but we'd have
1263 1266 # richer completion semantics in other evironments.
1264 1267 if self.use_jedi_completions:
1265 1268 self.matches.extend(self.python_jedi_matches(text, line_buffer, cursor_pos))
1266 1269
1267 1270 self.matches = sorted(set(self.matches), key=completions_sorting_key)
1268 1271
1269 1272 return text, self.matches
1270 1273
1271 1274 def rlcomplete(self, text, state):
1272 1275 """Return the state-th possible completion for 'text'.
1273 1276
1274 1277 This is called successively with state == 0, 1, 2, ... until it
1275 1278 returns None. The completion should begin with 'text'.
1276 1279
1277 1280 Parameters
1278 1281 ----------
1279 1282 text : string
1280 1283 Text to perform the completion on.
1281 1284
1282 1285 state : int
1283 1286 Counter used by readline.
1284 1287 """
1285 1288 if state==0:
1286 1289
1287 1290 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1288 1291 cursor_pos = self.readline.get_endidx()
1289 1292
1290 1293 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1291 1294 # (text, line_buffer, cursor_pos) ) # dbg
1292 1295
1293 1296 # if there is only a tab on a line with only whitespace, instead of
1294 1297 # the mostly useless 'do you want to see all million completions'
1295 1298 # message, just do the right thing and give the user his tab!
1296 1299 # Incidentally, this enables pasting of tabbed text from an editor
1297 1300 # (as long as autoindent is off).
1298 1301
1299 1302 # It should be noted that at least pyreadline still shows file
1300 1303 # completions - is there a way around it?
1301 1304
1302 1305 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1303 1306 # we don't interfere with their own tab-completion mechanism.
1304 1307 if not (self.dumb_terminal or line_buffer.strip()):
1305 1308 self.readline.insert_text('\t')
1306 1309 sys.stdout.flush()
1307 1310 return None
1308 1311
1309 1312 # Note: debugging exceptions that may occur in completion is very
1310 1313 # tricky, because readline unconditionally silences them. So if
1311 1314 # during development you suspect a bug in the completion code, turn
1312 1315 # this flag on temporarily by uncommenting the second form (don't
1313 1316 # flip the value in the first line, as the '# dbg' marker can be
1314 1317 # automatically detected and is used elsewhere).
1315 1318 DEBUG = False
1316 1319 #DEBUG = True # dbg
1317 1320 if DEBUG:
1318 1321 try:
1319 1322 self.complete(text, line_buffer, cursor_pos)
1320 1323 except:
1321 1324 import traceback; traceback.print_exc()
1322 1325 else:
1323 1326 # The normal production version is here
1324 1327
1325 1328 # This method computes the self.matches array
1326 1329 self.complete(text, line_buffer, cursor_pos)
1327 1330
1328 1331 try:
1329 1332 return self.matches[state]
1330 1333 except IndexError:
1331 1334 return None
1332 1335
@@ -1,805 +1,805 b''
1 1 # encoding: utf-8
2 2 """Tests for the IPython tab-completion machinery."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 import os
8 8 import sys
9 9 import unittest
10 10
11 11 from contextlib import contextmanager
12 12
13 13 import nose.tools as nt
14 14
15 15 from traitlets.config.loader import Config
16 16 from IPython import get_ipython
17 17 from IPython.core import completer
18 18 from IPython.external.decorators import knownfailureif
19 19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 20 from IPython.utils.generics import complete_object
21 21 from IPython.utils.py3compat import string_types, unicode_type
22 22 from IPython.testing import decorators as dec
23 23
24 24 #-----------------------------------------------------------------------------
25 25 # Test functions
26 26 #-----------------------------------------------------------------------------
27 27
28 28 @contextmanager
29 29 def greedy_completion():
30 30 ip = get_ipython()
31 31 greedy_original = ip.Completer.greedy
32 32 try:
33 33 ip.Completer.greedy = True
34 34 yield
35 35 finally:
36 36 ip.Completer.greedy = greedy_original
37 37
38 38 def test_protect_filename():
39 39 pairs = [ ('abc','abc'),
40 40 (' abc',r'\ abc'),
41 41 ('a bc',r'a\ bc'),
42 42 ('a bc',r'a\ \ bc'),
43 43 (' bc',r'\ \ bc'),
44 44 ]
45 45 # On posix, we also protect parens and other special characters
46 46 if sys.platform != 'win32':
47 47 pairs.extend( [('a(bc',r'a\(bc'),
48 48 ('a)bc',r'a\)bc'),
49 49 ('a( )bc',r'a\(\ \)bc'),
50 50 ('a[1]bc', r'a\[1\]bc'),
51 51 ('a{1}bc', r'a\{1\}bc'),
52 52 ('a#bc', r'a\#bc'),
53 53 ('a?bc', r'a\?bc'),
54 54 ('a=bc', r'a\=bc'),
55 55 ('a\\bc', r'a\\bc'),
56 56 ('a|bc', r'a\|bc'),
57 57 ('a;bc', r'a\;bc'),
58 58 ('a:bc', r'a\:bc'),
59 59 ("a'bc", r"a\'bc"),
60 60 ('a*bc', r'a\*bc'),
61 61 ('a"bc', r'a\"bc'),
62 62 ('a^bc', r'a\^bc'),
63 63 ('a&bc', r'a\&bc'),
64 64 ] )
65 65 # run the actual tests
66 66 for s1, s2 in pairs:
67 67 s1p = completer.protect_filename(s1)
68 68 nt.assert_equal(s1p, s2)
69 69
70 70
71 71 def check_line_split(splitter, test_specs):
72 72 for part1, part2, split in test_specs:
73 73 cursor_pos = len(part1)
74 74 line = part1+part2
75 75 out = splitter.split_line(line, cursor_pos)
76 76 nt.assert_equal(out, split)
77 77
78 78
79 79 def test_line_split():
80 80 """Basic line splitter test with default specs."""
81 81 sp = completer.CompletionSplitter()
82 82 # The format of the test specs is: part1, part2, expected answer. Parts 1
83 83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
84 84 # was at the end of part1. So an empty part2 represents someone hitting
85 85 # tab at the end of the line, the most common case.
86 86 t = [('run some/scrip', '', 'some/scrip'),
87 87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
88 88 ('echo $HOM', '', 'HOM'),
89 89 ('print sys.pa', '', 'sys.pa'),
90 90 ('print(sys.pa', '', 'sys.pa'),
91 91 ("execfile('scripts/er", '', 'scripts/er'),
92 92 ('a[x.', '', 'x.'),
93 93 ('a[x.', 'y', 'x.'),
94 94 ('cd "some_file/', '', 'some_file/'),
95 95 ]
96 96 check_line_split(sp, t)
97 97 # Ensure splitting works OK with unicode by re-running the tests with
98 98 # all inputs turned into unicode
99 99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
100 100
101 101
102 102 def test_custom_completion_error():
103 103 """Test that errors from custom attribute completers are silenced."""
104 104 ip = get_ipython()
105 105 class A(object): pass
106 106 ip.user_ns['a'] = A()
107 107
108 108 @complete_object.when_type(A)
109 109 def complete_A(a, existing_completions):
110 110 raise TypeError("this should be silenced")
111 111
112 112 ip.complete("a.")
113 113
114 114
115 115 def test_unicode_completions():
116 116 ip = get_ipython()
117 117 # Some strings that trigger different types of completion. Check them both
118 118 # in str and unicode forms
119 119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
120 120 for t in s + list(map(unicode_type, s)):
121 121 # We don't need to check exact completion values (they may change
122 122 # depending on the state of the namespace, but at least no exceptions
123 123 # should be thrown and the return value should be a pair of text, list
124 124 # values.
125 125 text, matches = ip.complete(t)
126 126 nt.assert_true(isinstance(text, string_types))
127 127 nt.assert_true(isinstance(matches, list))
128 128
129 129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
130 130 def test_latex_completions():
131 131 from IPython.core.latex_symbols import latex_symbols
132 132 import random
133 133 ip = get_ipython()
134 134 # Test some random unicode symbols
135 135 keys = random.sample(latex_symbols.keys(), 10)
136 136 for k in keys:
137 137 text, matches = ip.complete(k)
138 138 nt.assert_equal(len(matches),1)
139 139 nt.assert_equal(text, k)
140 140 nt.assert_equal(matches[0], latex_symbols[k])
141 141 # Test a more complex line
142 142 text, matches = ip.complete(u'print(\\alpha')
143 143 nt.assert_equals(text, u'\\alpha')
144 144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
145 145 # Test multiple matching latex symbols
146 146 text, matches = ip.complete(u'\\al')
147 147 nt.assert_in('\\alpha', matches)
148 148 nt.assert_in('\\aleph', matches)
149 149
150 150
151 151
152 152
153 153 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
154 154 def test_back_latex_completion():
155 155 ip = get_ipython()
156 156
157 157 # do not return more than 1 matches fro \beta, only the latex one.
158 158 name, matches = ip.complete('\\Ξ²')
159 159 nt.assert_equal(len(matches), 1)
160 160 nt.assert_equal(matches[0], '\\beta')
161 161
162 162 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
163 163 def test_back_unicode_completion():
164 164 ip = get_ipython()
165 165
166 166 name, matches = ip.complete('\\β…€')
167 167 nt.assert_equal(len(matches), 1)
168 168 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
169 169
170 170
171 171 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
172 172 def test_forward_unicode_completion():
173 173 ip = get_ipython()
174 174
175 175 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
176 176 nt.assert_equal(len(matches), 1)
177 177 nt.assert_equal(matches[0], 'β…€')
178 178
179 179 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
180 180 def test_no_ascii_back_completion():
181 181 ip = get_ipython()
182 182 with TemporaryWorkingDirectory(): # Avoid any filename completions
183 183 # single ascii letter that don't have yet completions
184 184 for letter in 'jJ' :
185 185 name, matches = ip.complete('\\'+letter)
186 186 nt.assert_equal(matches, [])
187 187
188 188
189 189
190 190
191 191 class CompletionSplitterTestCase(unittest.TestCase):
192 192 def setUp(self):
193 193 self.sp = completer.CompletionSplitter()
194 194
195 195 def test_delim_setting(self):
196 196 self.sp.delims = ' '
197 197 nt.assert_equal(self.sp.delims, ' ')
198 198 nt.assert_equal(self.sp._delim_expr, '[\ ]')
199 199
200 200 def test_spaces(self):
201 201 """Test with only spaces as split chars."""
202 202 self.sp.delims = ' '
203 203 t = [('foo', '', 'foo'),
204 204 ('run foo', '', 'foo'),
205 205 ('run foo', 'bar', 'foo'),
206 206 ]
207 207 check_line_split(self.sp, t)
208 208
209 209
210 210 def test_has_open_quotes1():
211 211 for s in ["'", "'''", "'hi' '"]:
212 212 nt.assert_equal(completer.has_open_quotes(s), "'")
213 213
214 214
215 215 def test_has_open_quotes2():
216 216 for s in ['"', '"""', '"hi" "']:
217 217 nt.assert_equal(completer.has_open_quotes(s), '"')
218 218
219 219
220 220 def test_has_open_quotes3():
221 221 for s in ["''", "''' '''", "'hi' 'ipython'"]:
222 222 nt.assert_false(completer.has_open_quotes(s))
223 223
224 224
225 225 def test_has_open_quotes4():
226 226 for s in ['""', '""" """', '"hi" "ipython"']:
227 227 nt.assert_false(completer.has_open_quotes(s))
228 228
229 229
230 230 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
231 231 def test_abspath_file_completions():
232 232 ip = get_ipython()
233 233 with TemporaryDirectory() as tmpdir:
234 234 prefix = os.path.join(tmpdir, 'foo')
235 235 suffixes = ['1', '2']
236 236 names = [prefix+s for s in suffixes]
237 237 for n in names:
238 238 open(n, 'w').close()
239 239
240 240 # Check simple completion
241 241 c = ip.complete(prefix)[1]
242 242 nt.assert_equal(c, names)
243 243
244 244 # Now check with a function call
245 245 cmd = 'a = f("%s' % prefix
246 246 c = ip.complete(prefix, cmd)[1]
247 247 comp = [prefix+s for s in suffixes]
248 248 nt.assert_equal(c, comp)
249 249
250 250
251 251 def test_local_file_completions():
252 252 ip = get_ipython()
253 253 with TemporaryWorkingDirectory():
254 254 prefix = './foo'
255 255 suffixes = ['1', '2']
256 256 names = [prefix+s for s in suffixes]
257 257 for n in names:
258 258 open(n, 'w').close()
259 259
260 260 # Check simple completion
261 261 c = ip.complete(prefix)[1]
262 262 nt.assert_equal(c, names)
263 263
264 264 # Now check with a function call
265 265 cmd = 'a = f("%s' % prefix
266 266 c = ip.complete(prefix, cmd)[1]
267 267 comp = set(prefix+s for s in suffixes)
268 268 nt.assert_true(comp.issubset(set(c)))
269 269
270 270
271 271 def test_greedy_completions():
272 272 ip = get_ipython()
273 273 ip.ex('a=list(range(5))')
274 274 _,c = ip.complete('.',line='a[0].')
275 275 nt.assert_false('.real' in c,
276 276 "Shouldn't have completed on a[0]: %s"%c)
277 277 with greedy_completion():
278 278 def _(line, cursor_pos, expect, message):
279 279 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
280 280 nt.assert_in(expect, c, message%c)
281 281
282 282 yield _, 'a[0].', 5, '.real', "Should have completed on a[0].: %s"
283 283 yield _, 'a[0].r', 6, '.real', "Should have completed on a[0].r: %s"
284 284
285 285 if sys.version_info > (3,4):
286 286 yield _, 'a[0].from_', 10, '.from_bytes', "Should have completed on a[0].from_: %s"
287 287
288 288
289 289 def _2():
290 290 # jedi bug, this will be empty, makeitfail for now,
291 291 # once jedi is fixed, switch to assert_in
292 292 # https://github.com/davidhalter/jedi/issues/718
293 293 _,c = ip.complete('.',line='a[0].from', cursor_pos=9)
294 294 nt.assert_not_in('.from_bytes', c, "Should not have completed on a[0].from (jedi bug), if fails, update test to assert_in: %s"%c)
295 295 yield _2
296 296
297 297
298 298
299 299 def test_omit__names():
300 300 # also happens to test IPCompleter as a configurable
301 301 ip = get_ipython()
302 302 ip._hidden_attr = 1
303 303 ip._x = {}
304 304 c = ip.Completer
305 305 ip.ex('ip=get_ipython()')
306 306 cfg = Config()
307 307 cfg.IPCompleter.omit__names = 0
308 308 c.update_config(cfg)
309 309 s,matches = c.complete('ip.')
310 310 nt.assert_in('ip.__str__', matches)
311 311 nt.assert_in('ip._hidden_attr', matches)
312 312 cfg = Config()
313 313 cfg.IPCompleter.omit__names = 1
314 314 c.update_config(cfg)
315 315 s,matches = c.complete('ip.')
316 316 nt.assert_not_in('ip.__str__', matches)
317 317 nt.assert_in('ip._hidden_attr', matches)
318 318 cfg = Config()
319 319 cfg.IPCompleter.omit__names = 2
320 320 c.update_config(cfg)
321 321 s,matches = c.complete('ip.')
322 322 nt.assert_not_in('ip.__str__', matches)
323 323 nt.assert_not_in('ip._hidden_attr', matches)
324 324 s,matches = c.complete('ip._x.')
325 325 nt.assert_in('ip._x.keys', matches)
326 326 del ip._hidden_attr
327 327
328 328
329 329 def test_limit_to__all__False_ok():
330 330 ip = get_ipython()
331 331 c = ip.Completer
332 332 ip.ex('class D: x=24')
333 333 ip.ex('d=D()')
334 334 cfg = Config()
335 335 cfg.IPCompleter.limit_to__all__ = False
336 336 c.update_config(cfg)
337 337 s, matches = c.complete('d.')
338 338 nt.assert_in('d.x', matches)
339 339
340 340
341 341 def test_get__all__entries_ok():
342 342 class A(object):
343 343 __all__ = ['x', 1]
344 344 words = completer.get__all__entries(A())
345 345 nt.assert_equal(words, ['x'])
346 346
347 347
348 348 def test_get__all__entries_no__all__ok():
349 349 class A(object):
350 350 pass
351 351 words = completer.get__all__entries(A())
352 352 nt.assert_equal(words, [])
353 353
354 354
355 355 def test_func_kw_completions():
356 356 ip = get_ipython()
357 357 c = ip.Completer
358 358 ip.ex('def myfunc(a=1,b=2): return a+b')
359 359 s, matches = c.complete(None, 'myfunc(1,b')
360 360 nt.assert_in('b=', matches)
361 361 # Simulate completing with cursor right after b (pos==10):
362 362 s, matches = c.complete(None, 'myfunc(1,b)', 10)
363 363 nt.assert_in('b=', matches)
364 364 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
365 365 nt.assert_in('b=', matches)
366 366 #builtin function
367 367 s, matches = c.complete(None, 'min(k, k')
368 368 nt.assert_in('key=', matches)
369 369
370 370
371 371 def test_default_arguments_from_docstring():
372 372 ip = get_ipython()
373 373 c = ip.Completer
374 374 kwd = c._default_arguments_from_docstring(
375 375 'min(iterable[, key=func]) -> value')
376 376 nt.assert_equal(kwd, ['key'])
377 377 #with cython type etc
378 378 kwd = c._default_arguments_from_docstring(
379 379 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
380 380 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
381 381 #white spaces
382 382 kwd = c._default_arguments_from_docstring(
383 383 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
384 384 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
385 385
386 386 def test_line_magics():
387 387 ip = get_ipython()
388 388 c = ip.Completer
389 389 s, matches = c.complete(None, 'lsmag')
390 390 nt.assert_in('%lsmagic', matches)
391 391 s, matches = c.complete(None, '%lsmag')
392 392 nt.assert_in('%lsmagic', matches)
393 393
394 394
395 395 def test_cell_magics():
396 396 from IPython.core.magic import register_cell_magic
397 397
398 398 @register_cell_magic
399 399 def _foo_cellm(line, cell):
400 400 pass
401 401
402 402 ip = get_ipython()
403 403 c = ip.Completer
404 404
405 405 s, matches = c.complete(None, '_foo_ce')
406 406 nt.assert_in('%%_foo_cellm', matches)
407 407 s, matches = c.complete(None, '%%_foo_ce')
408 408 nt.assert_in('%%_foo_cellm', matches)
409 409
410 410
411 411 def test_line_cell_magics():
412 412 from IPython.core.magic import register_line_cell_magic
413 413
414 414 @register_line_cell_magic
415 415 def _bar_cellm(line, cell):
416 416 pass
417 417
418 418 ip = get_ipython()
419 419 c = ip.Completer
420 420
421 421 # The policy here is trickier, see comments in completion code. The
422 422 # returned values depend on whether the user passes %% or not explicitly,
423 423 # and this will show a difference if the same name is both a line and cell
424 424 # magic.
425 425 s, matches = c.complete(None, '_bar_ce')
426 426 nt.assert_in('%_bar_cellm', matches)
427 427 nt.assert_in('%%_bar_cellm', matches)
428 428 s, matches = c.complete(None, '%_bar_ce')
429 429 nt.assert_in('%_bar_cellm', matches)
430 430 nt.assert_in('%%_bar_cellm', matches)
431 431 s, matches = c.complete(None, '%%_bar_ce')
432 432 nt.assert_not_in('%_bar_cellm', matches)
433 433 nt.assert_in('%%_bar_cellm', matches)
434 434
435 435
436 436 def test_magic_completion_order():
437 437
438 438 ip = get_ipython()
439 439 c = ip.Completer
440 440
441 441 # Test ordering of magics and non-magics with the same name
442 442 # We want the non-magic first
443 443
444 444 # Before importing matplotlib, there should only be one option:
445 445
446 446 text, matches = c.complete('mat')
447 447 nt.assert_equal(matches, ["%matplotlib"])
448 448
449 449
450 450 ip.run_cell("matplotlib = 1") # introduce name into namespace
451 451
452 452 # After the import, there should be two options, ordered like this:
453 453 text, matches = c.complete('mat')
454 454 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
455 455
456 456
457 457 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
458 458
459 459 # Order of user variable and line and cell magics with same name:
460 460 text, matches = c.complete('timeit')
461 461 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
462 462
463 463
464 464 def test_dict_key_completion_string():
465 465 """Test dictionary key completion for string keys"""
466 466 ip = get_ipython()
467 467 complete = ip.Completer.complete
468 468
469 469 ip.user_ns['d'] = {'abc': None}
470 470
471 471 # check completion at different stages
472 472 _, matches = complete(line_buffer="d[")
473 473 nt.assert_in("'abc'", matches)
474 474 nt.assert_not_in("'abc']", matches)
475 475
476 476 _, matches = complete(line_buffer="d['")
477 477 nt.assert_in("abc", matches)
478 478 nt.assert_not_in("abc']", matches)
479 479
480 480 _, matches = complete(line_buffer="d['a")
481 481 nt.assert_in("abc", matches)
482 482 nt.assert_not_in("abc']", matches)
483 483
484 484 # check use of different quoting
485 485 _, matches = complete(line_buffer="d[\"")
486 486 nt.assert_in("abc", matches)
487 487 nt.assert_not_in('abc\"]', matches)
488 488
489 489 _, matches = complete(line_buffer="d[\"a")
490 490 nt.assert_in("abc", matches)
491 491 nt.assert_not_in('abc\"]', matches)
492 492
493 493 # check sensitivity to following context
494 494 _, matches = complete(line_buffer="d[]", cursor_pos=2)
495 495 nt.assert_in("'abc'", matches)
496 496
497 497 _, matches = complete(line_buffer="d['']", cursor_pos=3)
498 498 nt.assert_in("abc", matches)
499 499 nt.assert_not_in("abc'", matches)
500 500 nt.assert_not_in("abc']", matches)
501 501
502 502 # check multiple solutions are correctly returned and that noise is not
503 503 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
504 504 5: None}
505 505
506 506 _, matches = complete(line_buffer="d['a")
507 507 nt.assert_in("abc", matches)
508 508 nt.assert_in("abd", matches)
509 509 nt.assert_not_in("bad", matches)
510 510 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
511 511
512 512 # check escaping and whitespace
513 513 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
514 514 _, matches = complete(line_buffer="d['a")
515 515 nt.assert_in("a\\nb", matches)
516 516 nt.assert_in("a\\'b", matches)
517 517 nt.assert_in("a\"b", matches)
518 518 nt.assert_in("a word", matches)
519 519 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
520 520
521 521 # - can complete on non-initial word of the string
522 522 _, matches = complete(line_buffer="d['a w")
523 523 nt.assert_in("word", matches)
524 524
525 525 # - understands quote escaping
526 526 _, matches = complete(line_buffer="d['a\\'")
527 527 nt.assert_in("b", matches)
528 528
529 529 # - default quoting should work like repr
530 530 _, matches = complete(line_buffer="d[")
531 531 nt.assert_in("\"a'b\"", matches)
532 532
533 533 # - when opening quote with ", possible to match with unescaped apostrophe
534 534 _, matches = complete(line_buffer="d[\"a'")
535 535 nt.assert_in("b", matches)
536 536
537 537 # need to not split at delims that readline won't split at
538 538 if '-' not in ip.Completer.splitter.delims:
539 539 ip.user_ns['d'] = {'before-after': None}
540 540 _, matches = complete(line_buffer="d['before-af")
541 541 nt.assert_in('before-after', matches)
542 542
543 543 def test_dict_key_completion_contexts():
544 544 """Test expression contexts in which dict key completion occurs"""
545 545 ip = get_ipython()
546 546 complete = ip.Completer.complete
547 547 d = {'abc': None}
548 548 ip.user_ns['d'] = d
549 549
550 550 class C:
551 551 data = d
552 552 ip.user_ns['C'] = C
553 553 ip.user_ns['get'] = lambda: d
554 554
555 555 def assert_no_completion(**kwargs):
556 556 _, matches = complete(**kwargs)
557 557 nt.assert_not_in('abc', matches)
558 558 nt.assert_not_in('abc\'', matches)
559 559 nt.assert_not_in('abc\']', matches)
560 560 nt.assert_not_in('\'abc\'', matches)
561 561 nt.assert_not_in('\'abc\']', matches)
562 562
563 563 def assert_completion(**kwargs):
564 564 _, matches = complete(**kwargs)
565 565 nt.assert_in("'abc'", matches)
566 566 nt.assert_not_in("'abc']", matches)
567 567
568 568 # no completion after string closed, even if reopened
569 569 assert_no_completion(line_buffer="d['a'")
570 570 assert_no_completion(line_buffer="d[\"a\"")
571 571 assert_no_completion(line_buffer="d['a' + ")
572 572 assert_no_completion(line_buffer="d['a' + '")
573 573
574 574 # completion in non-trivial expressions
575 575 assert_completion(line_buffer="+ d[")
576 576 assert_completion(line_buffer="(d[")
577 577 assert_completion(line_buffer="C.data[")
578 578
579 579 # greedy flag
580 580 def assert_completion(**kwargs):
581 581 _, matches = complete(**kwargs)
582 582 nt.assert_in("get()['abc']", matches)
583 583
584 584 assert_no_completion(line_buffer="get()[")
585 585 with greedy_completion():
586 586 assert_completion(line_buffer="get()[")
587 587 assert_completion(line_buffer="get()['")
588 588 assert_completion(line_buffer="get()['a")
589 589 assert_completion(line_buffer="get()['ab")
590 590 assert_completion(line_buffer="get()['abc")
591 591
592 592
593 593
594 594 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
595 595 def test_dict_key_completion_bytes():
596 596 """Test handling of bytes in dict key completion"""
597 597 ip = get_ipython()
598 598 complete = ip.Completer.complete
599 599
600 600 ip.user_ns['d'] = {'abc': None, b'abd': None}
601 601
602 602 _, matches = complete(line_buffer="d[")
603 603 nt.assert_in("'abc'", matches)
604 604 nt.assert_in("b'abd'", matches)
605 605
606 606 if False: # not currently implemented
607 607 _, matches = complete(line_buffer="d[b")
608 608 nt.assert_in("b'abd'", matches)
609 609 nt.assert_not_in("b'abc'", matches)
610 610
611 611 _, matches = complete(line_buffer="d[b'")
612 612 nt.assert_in("abd", matches)
613 613 nt.assert_not_in("abc", matches)
614 614
615 615 _, matches = complete(line_buffer="d[B'")
616 616 nt.assert_in("abd", matches)
617 617 nt.assert_not_in("abc", matches)
618 618
619 619 _, matches = complete(line_buffer="d['")
620 620 nt.assert_in("abc", matches)
621 621 nt.assert_not_in("abd", matches)
622 622
623 623
624 624 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
625 625 def test_dict_key_completion_unicode_py2():
626 626 """Test handling of unicode in dict key completion"""
627 627 ip = get_ipython()
628 628 complete = ip.Completer.complete
629 629
630 630 ip.user_ns['d'] = {u'abc': None,
631 631 u'a\u05d0b': None}
632 632
633 633 _, matches = complete(line_buffer="d[")
634 634 nt.assert_in("u'abc'", matches)
635 635 nt.assert_in("u'a\\u05d0b'", matches)
636 636
637 637 _, matches = complete(line_buffer="d['a")
638 638 nt.assert_in("abc", matches)
639 639 nt.assert_not_in("a\\u05d0b", matches)
640 640
641 641 _, matches = complete(line_buffer="d[u'a")
642 642 nt.assert_in("abc", matches)
643 643 nt.assert_in("a\\u05d0b", matches)
644 644
645 645 _, matches = complete(line_buffer="d[U'a")
646 646 nt.assert_in("abc", matches)
647 647 nt.assert_in("a\\u05d0b", matches)
648 648
649 649 # query using escape
650 650 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
651 651 nt.assert_in("u05d0b", matches) # tokenized after \\
652 652
653 653 # query using character
654 654 _, matches = complete(line_buffer=u"d[u'a\u05d0")
655 655 nt.assert_in(u"a\u05d0b", matches)
656 656
657 657 with greedy_completion():
658 658 _, matches = complete(line_buffer="d[")
659 659 nt.assert_in("d[u'abc']", matches)
660 660 nt.assert_in("d[u'a\\u05d0b']", matches)
661 661
662 662 _, matches = complete(line_buffer="d['a")
663 663 nt.assert_in("d['abc']", matches)
664 664 nt.assert_not_in("d[u'a\\u05d0b']", matches)
665 665
666 666 _, matches = complete(line_buffer="d[u'a")
667 667 nt.assert_in("d[u'abc']", matches)
668 668 nt.assert_in("d[u'a\\u05d0b']", matches)
669 669
670 670 _, matches = complete(line_buffer="d[U'a")
671 671 nt.assert_in("d[U'abc']", matches)
672 672 nt.assert_in("d[U'a\\u05d0b']", matches)
673 673
674 674 # query using escape
675 675 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
676 676 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
677 677
678 678 # query using character
679 679 _, matches = complete(line_buffer=u"d[u'a\u05d0")
680 680 nt.assert_in(u"d[u'a\u05d0b']", matches)
681 681
682 682
683 683 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
684 684 def test_dict_key_completion_unicode_py3():
685 685 """Test handling of unicode in dict key completion"""
686 686 ip = get_ipython()
687 687 complete = ip.Completer.complete
688 688
689 689 ip.user_ns['d'] = {u'a\u05d0': None}
690 690
691 691 # query using escape
692 692 _, matches = complete(line_buffer="d['a\\u05d0")
693 693 nt.assert_in("u05d0", matches) # tokenized after \\
694 694
695 695 # query using character
696 696 _, matches = complete(line_buffer="d['a\u05d0")
697 697 nt.assert_in(u"a\u05d0", matches)
698 698
699 699 with greedy_completion():
700 700 # query using escape
701 701 _, matches = complete(line_buffer="d['a\\u05d0")
702 702 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
703 703
704 704 # query using character
705 705 _, matches = complete(line_buffer="d['a\u05d0")
706 706 nt.assert_in(u"d['a\u05d0']", matches)
707 707
708 708
709 709
710 710 @dec.skip_without('numpy')
711 711 def test_struct_array_key_completion():
712 712 """Test dict key completion applies to numpy struct arrays"""
713 713 import numpy
714 714 ip = get_ipython()
715 715 complete = ip.Completer.complete
716 716 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
717 717 _, matches = complete(line_buffer="d['")
718 718 nt.assert_in("hello", matches)
719 719 nt.assert_in("world", matches)
720 720 # complete on the numpy struct itself
721 721 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
722 722 ('my_data', '>f4', 5)])
723 723 x = numpy.zeros(2, dtype=dt)
724 724 ip.user_ns['d'] = x[1]
725 725 _, matches = complete(line_buffer="d['")
726 726 nt.assert_in("my_head", matches)
727 727 nt.assert_in("my_data", matches)
728 728 # complete on a nested level
729 729 with greedy_completion():
730 730 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
731 731 _, matches = complete(line_buffer="d[1]['my_head']['")
732 732 nt.assert_true(any(["my_dt" in m for m in matches]))
733 733 nt.assert_true(any(["my_df" in m for m in matches]))
734 734
735 735
736 736 @dec.skip_without('pandas')
737 737 def test_dataframe_key_completion():
738 738 """Test dict key completion applies to pandas DataFrames"""
739 739 import pandas
740 740 ip = get_ipython()
741 741 complete = ip.Completer.complete
742 742 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
743 743 _, matches = complete(line_buffer="d['")
744 744 nt.assert_in("hello", matches)
745 745 nt.assert_in("world", matches)
746 746
747 747
748 748 def test_dict_key_completion_invalids():
749 749 """Smoke test cases dict key completion can't handle"""
750 750 ip = get_ipython()
751 751 complete = ip.Completer.complete
752 752
753 753 ip.user_ns['no_getitem'] = None
754 754 ip.user_ns['no_keys'] = []
755 755 ip.user_ns['cant_call_keys'] = dict
756 756 ip.user_ns['empty'] = {}
757 757 ip.user_ns['d'] = {'abc': 5}
758 758
759 759 _, matches = complete(line_buffer="no_getitem['")
760 760 _, matches = complete(line_buffer="no_keys['")
761 761 _, matches = complete(line_buffer="cant_call_keys['")
762 762 _, matches = complete(line_buffer="empty['")
763 763 _, matches = complete(line_buffer="name_error['")
764 764 _, matches = complete(line_buffer="d['\\") # incomplete escape
765 765
766 766 class KeyCompletable(object):
767 767 def __init__(self, things=()):
768 768 self.things = things
769 769
770 770 def _ipython_key_completions_(self):
771 771 return list(self.things)
772 772
773 773 def test_object_key_completion():
774 774 ip = get_ipython()
775 775 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
776 776
777 777 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
778 778 nt.assert_in('qwerty', matches)
779 779 nt.assert_in('qwick', matches)
780 780
781 781
782 782 def test_aimport_module_completer():
783 783 ip = get_ipython()
784 784 _, matches = ip.complete('i', '%aimport i')
785 785 nt.assert_in('io', matches)
786 786 nt.assert_not_in('int', matches)
787 787
788 788 def test_nested_import_module_completer():
789 789 ip = get_ipython()
790 790 _, matches = ip.complete(None, 'import IPython.co', 17)
791 791 nt.assert_in('IPython.core', matches)
792 792 nt.assert_not_in('import IPython.core', matches)
793 793 nt.assert_not_in('IPython.display', matches)
794 794
795 795 def test_import_module_completer():
796 796 ip = get_ipython()
797 797 _, matches = ip.complete('i', 'import i')
798 798 nt.assert_in('io', matches)
799 799 nt.assert_not_in('int', matches)
800 800
801 801 def test_from_module_completer():
802 802 ip = get_ipython()
803 _, matches = ip.complete('B', 'from io import B')
803 _, matches = ip.complete('B', 'from io import B', 16)
804 804 nt.assert_in('BytesIO', matches)
805 805 nt.assert_not_in('BaseException', matches)
General Comments 0
You need to be logged in to leave comments. Login now