##// END OF EJS Templates
Allow objects to define their own key completions...
Thomas Kluyver -
Show More
@@ -1,1272 +1,1289 b''
1 1 # encoding: utf-8
2 2 """Word completion for IPython.
3 3
4 4 This module is a fork of the rlcompleter module in the Python standard
5 5 library. The original enhancements made to rlcompleter have been sent
6 6 upstream and were accepted as of Python 2.3, but we need a lot more
7 7 functionality specific to IPython, so this module will continue to live as an
8 8 IPython-specific utility.
9 9
10 10 Original rlcompleter documentation:
11 11
12 12 This requires the latest extension to the readline module (the
13 13 completes keywords, built-ins and globals in __main__; when completing
14 14 NAME.NAME..., it evaluates (!) the expression up to the last dot and
15 15 completes its attributes.
16 16
17 17 It's very cool to do "import string" type "string.", hit the
18 18 completion key (twice), and see the list of names defined by the
19 19 string module!
20 20
21 21 Tip: to use the tab key as the completion key, call
22 22
23 23 readline.parse_and_bind("tab: complete")
24 24
25 25 Notes:
26 26
27 27 - Exceptions raised by the completer function are *ignored* (and
28 28 generally cause the completion to fail). This is a feature -- since
29 29 readline sets the tty device in raw (or cbreak) mode, printing a
30 30 traceback wouldn't work well without some complicated hoopla to save,
31 31 reset and restore the tty state.
32 32
33 33 - The evaluation of the NAME.NAME... form may cause arbitrary
34 34 application defined code to be executed if an object with a
35 35 ``__getattr__`` hook is found. Since it is the responsibility of the
36 36 application (or the user) to enable this feature, I consider this an
37 37 acceptable risk. More complicated expressions (e.g. function calls or
38 38 indexing operations) are *not* evaluated.
39 39
40 40 - GNU readline is also used by the built-in functions input() and
41 41 raw_input(), and thus these also benefit/suffer from the completer
42 42 features. Clearly an interactive application can benefit by
43 43 specifying its own completer function and using raw_input() for all
44 44 its input.
45 45
46 46 - When the original stdin is not a tty device, GNU readline is never
47 47 used, and this module (and the readline module) are silently inactive.
48 48 """
49 49
50 50 # Copyright (c) IPython Development Team.
51 51 # Distributed under the terms of the Modified BSD License.
52 52 #
53 53 # Some of this code originated from rlcompleter in the Python standard library
54 54 # Copyright (C) 2001 Python Software Foundation, www.python.org
55 55
56 56 import __main__
57 57 import glob
58 58 import inspect
59 59 import itertools
60 60 import keyword
61 61 import os
62 62 import re
63 63 import sys
64 64 import unicodedata
65 65 import string
66 66
67 67 from traitlets.config.configurable import Configurable
68 68 from IPython.core.error import TryNext
69 69 from IPython.core.inputsplitter import ESC_MAGIC
70 70 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
71 71 from IPython.utils import generics
72 72 from IPython.utils import io
73 73 from IPython.utils.decorators import undoc
74 from IPython.utils.dir2 import dir2
74 from IPython.utils.dir2 import dir2, safe_hasattr
75 75 from IPython.utils.process import arg_split
76 76 from IPython.utils.py3compat import builtin_mod, string_types, PY3
77 77 from traitlets import CBool, Enum
78 78
79 79 #-----------------------------------------------------------------------------
80 80 # Globals
81 81 #-----------------------------------------------------------------------------
82 82
83 83 # Public API
84 84 __all__ = ['Completer','IPCompleter']
85 85
86 86 if sys.platform == 'win32':
87 87 PROTECTABLES = ' '
88 88 else:
89 89 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
90 90
91 91
92 92 #-----------------------------------------------------------------------------
93 93 # Main functions and classes
94 94 #-----------------------------------------------------------------------------
95 95
96 96 def has_open_quotes(s):
97 97 """Return whether a string has open quotes.
98 98
99 99 This simply counts whether the number of quote characters of either type in
100 100 the string is odd.
101 101
102 102 Returns
103 103 -------
104 104 If there is an open quote, the quote character is returned. Else, return
105 105 False.
106 106 """
107 107 # We check " first, then ', so complex cases with nested quotes will get
108 108 # the " to take precedence.
109 109 if s.count('"') % 2:
110 110 return '"'
111 111 elif s.count("'") % 2:
112 112 return "'"
113 113 else:
114 114 return False
115 115
116 116
117 117 def protect_filename(s):
118 118 """Escape a string to protect certain characters."""
119 119
120 120 return "".join([(ch in PROTECTABLES and '\\' + ch or ch)
121 121 for ch in s])
122 122
123 123 def expand_user(path):
124 124 """Expand '~'-style usernames in strings.
125 125
126 126 This is similar to :func:`os.path.expanduser`, but it computes and returns
127 127 extra information that will be useful if the input was being used in
128 128 computing completions, and you wish to return the completions with the
129 129 original '~' instead of its expanded value.
130 130
131 131 Parameters
132 132 ----------
133 133 path : str
134 134 String to be expanded. If no ~ is present, the output is the same as the
135 135 input.
136 136
137 137 Returns
138 138 -------
139 139 newpath : str
140 140 Result of ~ expansion in the input path.
141 141 tilde_expand : bool
142 142 Whether any expansion was performed or not.
143 143 tilde_val : str
144 144 The value that ~ was replaced with.
145 145 """
146 146 # Default values
147 147 tilde_expand = False
148 148 tilde_val = ''
149 149 newpath = path
150 150
151 151 if path.startswith('~'):
152 152 tilde_expand = True
153 153 rest = len(path)-1
154 154 newpath = os.path.expanduser(path)
155 155 if rest:
156 156 tilde_val = newpath[:-rest]
157 157 else:
158 158 tilde_val = newpath
159 159
160 160 return newpath, tilde_expand, tilde_val
161 161
162 162
163 163 def compress_user(path, tilde_expand, tilde_val):
164 164 """Does the opposite of expand_user, with its outputs.
165 165 """
166 166 if tilde_expand:
167 167 return path.replace(tilde_val, '~')
168 168 else:
169 169 return path
170 170
171 171
172 172
173 173 def completions_sorting_key(word):
174 174 """key for sorting completions
175 175
176 176 This does several things:
177 177
178 178 - Lowercase all completions, so they are sorted alphabetically with
179 179 upper and lower case words mingled
180 180 - Demote any completions starting with underscores to the end
181 181 - Insert any %magic and %%cellmagic completions in the alphabetical order
182 182 by their name
183 183 """
184 184 # Case insensitive sort
185 185 word = word.lower()
186 186
187 187 prio1, prio2 = 0, 0
188 188
189 189 if word.startswith('__'):
190 190 prio1 = 2
191 191 elif word.startswith('_'):
192 192 prio1 = 1
193 193
194 194 if word.startswith('%%'):
195 195 # If there's another % in there, this is something else, so leave it alone
196 196 if not "%" in word[2:]:
197 197 word = word[2:]
198 198 prio2 = 2
199 199 elif word.startswith('%'):
200 200 if not "%" in word[1:]:
201 201 word = word[1:]
202 202 prio2 = 1
203 203
204 204 return prio1, word, prio2
205 205
206 206
207 207 @undoc
208 208 class Bunch(object): pass
209 209
210 210
211 211 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
212 212 GREEDY_DELIMS = ' =\r\n'
213 213
214 214
215 215 class CompletionSplitter(object):
216 216 """An object to split an input line in a manner similar to readline.
217 217
218 218 By having our own implementation, we can expose readline-like completion in
219 219 a uniform manner to all frontends. This object only needs to be given the
220 220 line of text to be split and the cursor position on said line, and it
221 221 returns the 'word' to be completed on at the cursor after splitting the
222 222 entire line.
223 223
224 224 What characters are used as splitting delimiters can be controlled by
225 225 setting the `delims` attribute (this is a property that internally
226 226 automatically builds the necessary regular expression)"""
227 227
228 228 # Private interface
229 229
230 230 # A string of delimiter characters. The default value makes sense for
231 231 # IPython's most typical usage patterns.
232 232 _delims = DELIMS
233 233
234 234 # The expression (a normal string) to be compiled into a regular expression
235 235 # for actual splitting. We store it as an attribute mostly for ease of
236 236 # debugging, since this type of code can be so tricky to debug.
237 237 _delim_expr = None
238 238
239 239 # The regular expression that does the actual splitting
240 240 _delim_re = None
241 241
242 242 def __init__(self, delims=None):
243 243 delims = CompletionSplitter._delims if delims is None else delims
244 244 self.delims = delims
245 245
246 246 @property
247 247 def delims(self):
248 248 """Return the string of delimiter characters."""
249 249 return self._delims
250 250
251 251 @delims.setter
252 252 def delims(self, delims):
253 253 """Set the delimiters for line splitting."""
254 254 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
255 255 self._delim_re = re.compile(expr)
256 256 self._delims = delims
257 257 self._delim_expr = expr
258 258
259 259 def split_line(self, line, cursor_pos=None):
260 260 """Split a line of text with a cursor at the given position.
261 261 """
262 262 l = line if cursor_pos is None else line[:cursor_pos]
263 263 return self._delim_re.split(l)[-1]
264 264
265 265
266 266 class Completer(Configurable):
267 267
268 268 greedy = CBool(False, config=True,
269 269 help="""Activate greedy completion
270 270
271 271 This will enable completion on elements of lists, results of function calls, etc.,
272 272 but can be unsafe because the code is actually evaluated on TAB.
273 273 """
274 274 )
275 275
276 276
277 277 def __init__(self, namespace=None, global_namespace=None, **kwargs):
278 278 """Create a new completer for the command line.
279 279
280 280 Completer(namespace=ns,global_namespace=ns2) -> completer instance.
281 281
282 282 If unspecified, the default namespace where completions are performed
283 283 is __main__ (technically, __main__.__dict__). Namespaces should be
284 284 given as dictionaries.
285 285
286 286 An optional second namespace can be given. This allows the completer
287 287 to handle cases where both the local and global scopes need to be
288 288 distinguished.
289 289
290 290 Completer instances should be used as the completion mechanism of
291 291 readline via the set_completer() call:
292 292
293 293 readline.set_completer(Completer(my_namespace).complete)
294 294 """
295 295
296 296 # Don't bind to namespace quite yet, but flag whether the user wants a
297 297 # specific namespace or to use __main__.__dict__. This will allow us
298 298 # to bind to __main__.__dict__ at completion time, not now.
299 299 if namespace is None:
300 300 self.use_main_ns = 1
301 301 else:
302 302 self.use_main_ns = 0
303 303 self.namespace = namespace
304 304
305 305 # The global namespace, if given, can be bound directly
306 306 if global_namespace is None:
307 307 self.global_namespace = {}
308 308 else:
309 309 self.global_namespace = global_namespace
310 310
311 311 super(Completer, self).__init__(**kwargs)
312 312
313 313 def complete(self, text, state):
314 314 """Return the next possible completion for 'text'.
315 315
316 316 This is called successively with state == 0, 1, 2, ... until it
317 317 returns None. The completion should begin with 'text'.
318 318
319 319 """
320 320 if self.use_main_ns:
321 321 self.namespace = __main__.__dict__
322 322
323 323 if state == 0:
324 324 if "." in text:
325 325 self.matches = self.attr_matches(text)
326 326 else:
327 327 self.matches = self.global_matches(text)
328 328 try:
329 329 return self.matches[state]
330 330 except IndexError:
331 331 return None
332 332
333 333 def global_matches(self, text):
334 334 """Compute matches when text is a simple name.
335 335
336 336 Return a list of all keywords, built-in functions and names currently
337 337 defined in self.namespace or self.global_namespace that match.
338 338
339 339 """
340 340 #print 'Completer->global_matches, txt=%r' % text # dbg
341 341 matches = []
342 342 match_append = matches.append
343 343 n = len(text)
344 344 for lst in [keyword.kwlist,
345 345 builtin_mod.__dict__.keys(),
346 346 self.namespace.keys(),
347 347 self.global_namespace.keys()]:
348 348 for word in lst:
349 349 if word[:n] == text and word != "__builtins__":
350 350 match_append(word)
351 351 return matches
352 352
353 353 def attr_matches(self, text):
354 354 """Compute matches when text contains a dot.
355 355
356 356 Assuming the text is of the form NAME.NAME....[NAME], and is
357 357 evaluatable in self.namespace or self.global_namespace, it will be
358 358 evaluated and its attributes (as revealed by dir()) are used as
359 359 possible completions. (For class instances, class members are are
360 360 also considered.)
361 361
362 362 WARNING: this can still invoke arbitrary C code, if an object
363 363 with a __getattr__ hook is evaluated.
364 364
365 365 """
366 366
367 367 #io.rprint('Completer->attr_matches, txt=%r' % text) # dbg
368 368 # Another option, seems to work great. Catches things like ''.<tab>
369 369 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
370 370
371 371 if m:
372 372 expr, attr = m.group(1, 3)
373 373 elif self.greedy:
374 374 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
375 375 if not m2:
376 376 return []
377 377 expr, attr = m2.group(1,2)
378 378 else:
379 379 return []
380 380
381 381 try:
382 382 obj = eval(expr, self.namespace)
383 383 except:
384 384 try:
385 385 obj = eval(expr, self.global_namespace)
386 386 except:
387 387 return []
388 388
389 389 if self.limit_to__all__ and hasattr(obj, '__all__'):
390 390 words = get__all__entries(obj)
391 391 else:
392 392 words = dir2(obj)
393 393
394 394 try:
395 395 words = generics.complete_object(obj, words)
396 396 except TryNext:
397 397 pass
398 398 except Exception:
399 399 # Silence errors from completion function
400 400 #raise # dbg
401 401 pass
402 402 # Build match list to return
403 403 n = len(attr)
404 404 res = ["%s.%s" % (expr, w) for w in words if w[:n] == attr ]
405 405 return res
406 406
407 407
408 408 def get__all__entries(obj):
409 409 """returns the strings in the __all__ attribute"""
410 410 try:
411 411 words = getattr(obj, '__all__')
412 412 except:
413 413 return []
414 414
415 415 return [w for w in words if isinstance(w, string_types)]
416 416
417 417
418 418 def match_dict_keys(keys, prefix, delims):
419 419 """Used by dict_key_matches, matching the prefix to a list of keys"""
420 420 if not prefix:
421 421 return None, 0, [repr(k) for k in keys
422 422 if isinstance(k, (string_types, bytes))]
423 423 quote_match = re.search('["\']', prefix)
424 424 quote = quote_match.group()
425 425 try:
426 426 prefix_str = eval(prefix + quote, {})
427 427 except Exception:
428 428 return None, 0, []
429 429
430 430 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
431 431 token_match = re.search(pattern, prefix, re.UNICODE)
432 432 token_start = token_match.start()
433 433 token_prefix = token_match.group()
434 434
435 435 # TODO: support bytes in Py3k
436 436 matched = []
437 437 for key in keys:
438 438 try:
439 439 if not key.startswith(prefix_str):
440 440 continue
441 441 except (AttributeError, TypeError, UnicodeError):
442 442 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
443 443 continue
444 444
445 445 # reformat remainder of key to begin with prefix
446 446 rem = key[len(prefix_str):]
447 447 # force repr wrapped in '
448 448 rem_repr = repr(rem + '"')
449 449 if rem_repr.startswith('u') and prefix[0] not in 'uU':
450 450 # Found key is unicode, but prefix is Py2 string.
451 451 # Therefore attempt to interpret key as string.
452 452 try:
453 453 rem_repr = repr(rem.encode('ascii') + '"')
454 454 except UnicodeEncodeError:
455 455 continue
456 456
457 457 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
458 458 if quote == '"':
459 459 # The entered prefix is quoted with ",
460 460 # but the match is quoted with '.
461 461 # A contained " hence needs escaping for comparison:
462 462 rem_repr = rem_repr.replace('"', '\\"')
463 463
464 464 # then reinsert prefix from start of token
465 465 matched.append('%s%s' % (token_prefix, rem_repr))
466 466 return quote, token_start, matched
467 467
468 468
469 469 def _safe_isinstance(obj, module, class_name):
470 470 """Checks if obj is an instance of module.class_name if loaded
471 471 """
472 472 return (module in sys.modules and
473 473 isinstance(obj, getattr(__import__(module), class_name)))
474 474
475 def _safe_really_hasattr(obj, name):
476 """Checks that an object genuinely has a given attribute.
477
478 Some objects claim to have any attribute that's requested, to act as a lazy
479 proxy for something else. We want to catch these cases and ignore their
480 claim to have the attribute we're interested in.
481 """
482 if safe_hasattr(obj, '_ipy_proxy_check_dont_define_this_'):
483 # If it claims this exists, don't trust it
484 return False
485
486 return safe_hasattr(obj, name)
475 487
476 488
477 489 def back_unicode_name_matches(text):
478 490 u"""Match unicode characters back to unicode name
479 491
480 492 This does β˜ƒ -> \\snowman
481 493
482 494 Note that snowman is not a valid python3 combining character but will be expanded.
483 495 Though it will not recombine back to the snowman character by the completion machinery.
484 496
485 497 This will not either back-complete standard sequences like \\n, \\b ...
486 498
487 499 Used on Python 3 only.
488 500 """
489 501 if len(text)<2:
490 502 return u'', ()
491 503 maybe_slash = text[-2]
492 504 if maybe_slash != '\\':
493 505 return u'', ()
494 506
495 507 char = text[-1]
496 508 # no expand on quote for completion in strings.
497 509 # nor backcomplete standard ascii keys
498 510 if char in string.ascii_letters or char in ['"',"'"]:
499 511 return u'', ()
500 512 try :
501 513 unic = unicodedata.name(char)
502 514 return '\\'+char,['\\'+unic]
503 515 except KeyError as e:
504 516 pass
505 517 return u'', ()
506 518
507 519 def back_latex_name_matches(text):
508 520 u"""Match latex characters back to unicode name
509 521
510 522 This does ->\\sqrt
511 523
512 524 Used on Python 3 only.
513 525 """
514 526 if len(text)<2:
515 527 return u'', ()
516 528 maybe_slash = text[-2]
517 529 if maybe_slash != '\\':
518 530 return u'', ()
519 531
520 532
521 533 char = text[-1]
522 534 # no expand on quote for completion in strings.
523 535 # nor backcomplete standard ascii keys
524 536 if char in string.ascii_letters or char in ['"',"'"]:
525 537 return u'', ()
526 538 try :
527 539 latex = reverse_latex_symbol[char]
528 540 # '\\' replace the \ as well
529 541 return '\\'+char,[latex]
530 542 except KeyError as e:
531 543 pass
532 544 return u'', ()
533 545
534 546
535 547 class IPCompleter(Completer):
536 548 """Extension of the completer class with IPython-specific features"""
537 549
538 550 def _greedy_changed(self, name, old, new):
539 551 """update the splitter and readline delims when greedy is changed"""
540 552 if new:
541 553 self.splitter.delims = GREEDY_DELIMS
542 554 else:
543 555 self.splitter.delims = DELIMS
544 556
545 557 if self.readline:
546 558 self.readline.set_completer_delims(self.splitter.delims)
547 559
548 560 merge_completions = CBool(True, config=True,
549 561 help="""Whether to merge completion results into a single list
550 562
551 563 If False, only the completion results from the first non-empty
552 564 completer will be returned.
553 565 """
554 566 )
555 567 omit__names = Enum((0,1,2), default_value=2, config=True,
556 568 help="""Instruct the completer to omit private method names
557 569
558 570 Specifically, when completing on ``object.<tab>``.
559 571
560 572 When 2 [default]: all names that start with '_' will be excluded.
561 573
562 574 When 1: all 'magic' names (``__foo__``) will be excluded.
563 575
564 576 When 0: nothing will be excluded.
565 577 """
566 578 )
567 579 limit_to__all__ = CBool(default_value=False, config=True,
568 580 help="""Instruct the completer to use __all__ for the completion
569 581
570 582 Specifically, when completing on ``object.<tab>``.
571 583
572 584 When True: only those names in obj.__all__ will be included.
573 585
574 586 When False [default]: the __all__ attribute is ignored
575 587 """
576 588 )
577 589
578 590 def __init__(self, shell=None, namespace=None, global_namespace=None,
579 591 use_readline=True, config=None, **kwargs):
580 592 """IPCompleter() -> completer
581 593
582 594 Return a completer object suitable for use by the readline library
583 595 via readline.set_completer().
584 596
585 597 Inputs:
586 598
587 599 - shell: a pointer to the ipython shell itself. This is needed
588 600 because this completer knows about magic functions, and those can
589 601 only be accessed via the ipython instance.
590 602
591 603 - namespace: an optional dict where completions are performed.
592 604
593 605 - global_namespace: secondary optional dict for completions, to
594 606 handle cases (such as IPython embedded inside functions) where
595 607 both Python scopes are visible.
596 608
597 609 use_readline : bool, optional
598 610 If true, use the readline library. This completer can still function
599 611 without readline, though in that case callers must provide some extra
600 612 information on each call about the current line."""
601 613
602 614 self.magic_escape = ESC_MAGIC
603 615 self.splitter = CompletionSplitter()
604 616
605 617 # Readline configuration, only used by the rlcompleter method.
606 618 if use_readline:
607 619 # We store the right version of readline so that later code
608 620 import IPython.utils.rlineimpl as readline
609 621 self.readline = readline
610 622 else:
611 623 self.readline = None
612 624
613 625 # _greedy_changed() depends on splitter and readline being defined:
614 626 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
615 627 config=config, **kwargs)
616 628
617 629 # List where completion matches will be stored
618 630 self.matches = []
619 631 self.shell = shell
620 632 # Regexp to split filenames with spaces in them
621 633 self.space_name_re = re.compile(r'([^\\] )')
622 634 # Hold a local ref. to glob.glob for speed
623 635 self.glob = glob.glob
624 636
625 637 # Determine if we are running on 'dumb' terminals, like (X)Emacs
626 638 # buffers, to avoid completion problems.
627 639 term = os.environ.get('TERM','xterm')
628 640 self.dumb_terminal = term in ['dumb','emacs']
629 641
630 642 # Special handling of backslashes needed in win32 platforms
631 643 if sys.platform == "win32":
632 644 self.clean_glob = self._clean_glob_win32
633 645 else:
634 646 self.clean_glob = self._clean_glob
635 647
636 648 #regexp to parse docstring for function signature
637 649 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
638 650 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
639 651 #use this if positional argument name is also needed
640 652 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
641 653
642 654 # All active matcher routines for completion
643 655 self.matchers = [self.python_matches,
644 656 self.file_matches,
645 657 self.magic_matches,
646 658 self.python_func_kw_matches,
647 659 self.dict_key_matches,
648 660 ]
649 661
650 662 def all_completions(self, text):
651 663 """
652 664 Wrapper around the complete method for the benefit of emacs
653 665 and pydb.
654 666 """
655 667 return self.complete(text)[1]
656 668
657 669 def _clean_glob(self,text):
658 670 return self.glob("%s*" % text)
659 671
660 672 def _clean_glob_win32(self,text):
661 673 return [f.replace("\\","/")
662 674 for f in self.glob("%s*" % text)]
663 675
664 676 def file_matches(self, text):
665 677 """Match filenames, expanding ~USER type strings.
666 678
667 679 Most of the seemingly convoluted logic in this completer is an
668 680 attempt to handle filenames with spaces in them. And yet it's not
669 681 quite perfect, because Python's readline doesn't expose all of the
670 682 GNU readline details needed for this to be done correctly.
671 683
672 684 For a filename with a space in it, the printed completions will be
673 685 only the parts after what's already been typed (instead of the
674 686 full completions, as is normally done). I don't think with the
675 687 current (as of Python 2.3) Python readline it's possible to do
676 688 better."""
677 689
678 690 #io.rprint('Completer->file_matches: <%r>' % text) # dbg
679 691
680 692 # chars that require escaping with backslash - i.e. chars
681 693 # that readline treats incorrectly as delimiters, but we
682 694 # don't want to treat as delimiters in filename matching
683 695 # when escaped with backslash
684 696 if text.startswith('!'):
685 697 text = text[1:]
686 698 text_prefix = '!'
687 699 else:
688 700 text_prefix = ''
689 701
690 702 text_until_cursor = self.text_until_cursor
691 703 # track strings with open quotes
692 704 open_quotes = has_open_quotes(text_until_cursor)
693 705
694 706 if '(' in text_until_cursor or '[' in text_until_cursor:
695 707 lsplit = text
696 708 else:
697 709 try:
698 710 # arg_split ~ shlex.split, but with unicode bugs fixed by us
699 711 lsplit = arg_split(text_until_cursor)[-1]
700 712 except ValueError:
701 713 # typically an unmatched ", or backslash without escaped char.
702 714 if open_quotes:
703 715 lsplit = text_until_cursor.split(open_quotes)[-1]
704 716 else:
705 717 return []
706 718 except IndexError:
707 719 # tab pressed on empty line
708 720 lsplit = ""
709 721
710 722 if not open_quotes and lsplit != protect_filename(lsplit):
711 723 # if protectables are found, do matching on the whole escaped name
712 724 has_protectables = True
713 725 text0,text = text,lsplit
714 726 else:
715 727 has_protectables = False
716 728 text = os.path.expanduser(text)
717 729
718 730 if text == "":
719 731 return [text_prefix + protect_filename(f) for f in self.glob("*")]
720 732
721 733 # Compute the matches from the filesystem
722 734 m0 = self.clean_glob(text.replace('\\',''))
723 735
724 736 if has_protectables:
725 737 # If we had protectables, we need to revert our changes to the
726 738 # beginning of filename so that we don't double-write the part
727 739 # of the filename we have so far
728 740 len_lsplit = len(lsplit)
729 741 matches = [text_prefix + text0 +
730 742 protect_filename(f[len_lsplit:]) for f in m0]
731 743 else:
732 744 if open_quotes:
733 745 # if we have a string with an open quote, we don't need to
734 746 # protect the names at all (and we _shouldn't_, as it
735 747 # would cause bugs when the filesystem call is made).
736 748 matches = m0
737 749 else:
738 750 matches = [text_prefix +
739 751 protect_filename(f) for f in m0]
740 752
741 753 #io.rprint('mm', matches) # dbg
742 754
743 755 # Mark directories in input list by appending '/' to their names.
744 756 matches = [x+'/' if os.path.isdir(x) else x for x in matches]
745 757 return matches
746 758
747 759 def magic_matches(self, text):
748 760 """Match magics"""
749 761 #print 'Completer->magic_matches:',text,'lb',self.text_until_cursor # dbg
750 762 # Get all shell magics now rather than statically, so magics loaded at
751 763 # runtime show up too.
752 764 lsm = self.shell.magics_manager.lsmagic()
753 765 line_magics = lsm['line']
754 766 cell_magics = lsm['cell']
755 767 pre = self.magic_escape
756 768 pre2 = pre+pre
757 769
758 770 # Completion logic:
759 771 # - user gives %%: only do cell magics
760 772 # - user gives %: do both line and cell magics
761 773 # - no prefix: do both
762 774 # In other words, line magics are skipped if the user gives %% explicitly
763 775 bare_text = text.lstrip(pre)
764 776 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
765 777 if not text.startswith(pre2):
766 778 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
767 779 return comp
768 780
769 781 def python_matches(self,text):
770 782 """Match attributes or global python names"""
771 783
772 784 #io.rprint('Completer->python_matches, txt=%r' % text) # dbg
773 785 if "." in text:
774 786 try:
775 787 matches = self.attr_matches(text)
776 788 if text.endswith('.') and self.omit__names:
777 789 if self.omit__names == 1:
778 790 # true if txt is _not_ a __ name, false otherwise:
779 791 no__name = (lambda txt:
780 792 re.match(r'.*\.__.*?__',txt) is None)
781 793 else:
782 794 # true if txt is _not_ a _ name, false otherwise:
783 795 no__name = (lambda txt:
784 796 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
785 797 matches = filter(no__name, matches)
786 798 except NameError:
787 799 # catches <undefined attributes>.<tab>
788 800 matches = []
789 801 else:
790 802 matches = self.global_matches(text)
791 803
792 804 return matches
793 805
794 806 def _default_arguments_from_docstring(self, doc):
795 807 """Parse the first line of docstring for call signature.
796 808
797 809 Docstring should be of the form 'min(iterable[, key=func])\n'.
798 810 It can also parse cython docstring of the form
799 811 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
800 812 """
801 813 if doc is None:
802 814 return []
803 815
804 816 #care only the firstline
805 817 line = doc.lstrip().splitlines()[0]
806 818
807 819 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
808 820 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
809 821 sig = self.docstring_sig_re.search(line)
810 822 if sig is None:
811 823 return []
812 824 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
813 825 sig = sig.groups()[0].split(',')
814 826 ret = []
815 827 for s in sig:
816 828 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
817 829 ret += self.docstring_kwd_re.findall(s)
818 830 return ret
819 831
820 832 def _default_arguments(self, obj):
821 833 """Return the list of default arguments of obj if it is callable,
822 834 or empty list otherwise."""
823 835 call_obj = obj
824 836 ret = []
825 837 if inspect.isbuiltin(obj):
826 838 pass
827 839 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
828 840 if inspect.isclass(obj):
829 841 #for cython embededsignature=True the constructor docstring
830 842 #belongs to the object itself not __init__
831 843 ret += self._default_arguments_from_docstring(
832 844 getattr(obj, '__doc__', ''))
833 845 # for classes, check for __init__,__new__
834 846 call_obj = (getattr(obj, '__init__', None) or
835 847 getattr(obj, '__new__', None))
836 848 # for all others, check if they are __call__able
837 849 elif hasattr(obj, '__call__'):
838 850 call_obj = obj.__call__
839 851 ret += self._default_arguments_from_docstring(
840 852 getattr(call_obj, '__doc__', ''))
841 853
842 854 if PY3:
843 855 _keeps = (inspect.Parameter.KEYWORD_ONLY,
844 856 inspect.Parameter.POSITIONAL_OR_KEYWORD)
845 857 signature = inspect.signature
846 858 else:
847 859 import IPython.utils.signatures
848 860 _keeps = (IPython.utils.signatures.Parameter.KEYWORD_ONLY,
849 861 IPython.utils.signatures.Parameter.POSITIONAL_OR_KEYWORD)
850 862 signature = IPython.utils.signatures.signature
851 863
852 864 try:
853 865 sig = signature(call_obj)
854 866 ret.extend(k for k, v in sig.parameters.items() if
855 867 v.kind in _keeps)
856 868 except ValueError:
857 869 pass
858 870
859 871 return list(set(ret))
860 872
861 873 def python_func_kw_matches(self,text):
862 874 """Match named parameters (kwargs) of the last open function"""
863 875
864 876 if "." in text: # a parameter cannot be dotted
865 877 return []
866 878 try: regexp = self.__funcParamsRegex
867 879 except AttributeError:
868 880 regexp = self.__funcParamsRegex = re.compile(r'''
869 881 '.*?(?<!\\)' | # single quoted strings or
870 882 ".*?(?<!\\)" | # double quoted strings or
871 883 \w+ | # identifier
872 884 \S # other characters
873 885 ''', re.VERBOSE | re.DOTALL)
874 886 # 1. find the nearest identifier that comes before an unclosed
875 887 # parenthesis before the cursor
876 888 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
877 889 tokens = regexp.findall(self.text_until_cursor)
878 890 tokens.reverse()
879 891 iterTokens = iter(tokens); openPar = 0
880 892
881 893 for token in iterTokens:
882 894 if token == ')':
883 895 openPar -= 1
884 896 elif token == '(':
885 897 openPar += 1
886 898 if openPar > 0:
887 899 # found the last unclosed parenthesis
888 900 break
889 901 else:
890 902 return []
891 903 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
892 904 ids = []
893 905 isId = re.compile(r'\w+$').match
894 906
895 907 while True:
896 908 try:
897 909 ids.append(next(iterTokens))
898 910 if not isId(ids[-1]):
899 911 ids.pop(); break
900 912 if not next(iterTokens) == '.':
901 913 break
902 914 except StopIteration:
903 915 break
904 916 # lookup the candidate callable matches either using global_matches
905 917 # or attr_matches for dotted names
906 918 if len(ids) == 1:
907 919 callableMatches = self.global_matches(ids[0])
908 920 else:
909 921 callableMatches = self.attr_matches('.'.join(ids[::-1]))
910 922 argMatches = []
911 923 for callableMatch in callableMatches:
912 924 try:
913 925 namedArgs = self._default_arguments(eval(callableMatch,
914 926 self.namespace))
915 927 except:
916 928 continue
917 929
918 930 for namedArg in namedArgs:
919 931 if namedArg.startswith(text):
920 932 argMatches.append("%s=" %namedArg)
921 933 return argMatches
922 934
923 935 def dict_key_matches(self, text):
924 936 "Match string keys in a dictionary, after e.g. 'foo[' "
925 937 def get_keys(obj):
926 # Only allow completion for known in-memory dict-like types
938 # Objects can define their own completions by defining an
939 # _ipy_key_completions_() method.
940 if _safe_really_hasattr(obj, '_ipy_key_completions_'):
941 return obj._ipy_key_completions_()
942
943 # Special case some common in-memory dict-like types
927 944 if isinstance(obj, dict) or\
928 945 _safe_isinstance(obj, 'pandas', 'DataFrame'):
929 946 try:
930 947 return list(obj.keys())
931 948 except Exception:
932 949 return []
933 950 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
934 951 _safe_isinstance(obj, 'numpy', 'void'):
935 952 return obj.dtype.names or []
936 953 return []
937 954
938 955 try:
939 956 regexps = self.__dict_key_regexps
940 957 except AttributeError:
941 958 dict_key_re_fmt = r'''(?x)
942 959 ( # match dict-referring expression wrt greedy setting
943 960 %s
944 961 )
945 962 \[ # open bracket
946 963 \s* # and optional whitespace
947 964 ([uUbB]? # string prefix (r not handled)
948 965 (?: # unclosed string
949 966 '(?:[^']|(?<!\\)\\')*
950 967 |
951 968 "(?:[^"]|(?<!\\)\\")*
952 969 )
953 970 )?
954 971 $
955 972 '''
956 973 regexps = self.__dict_key_regexps = {
957 974 False: re.compile(dict_key_re_fmt % '''
958 975 # identifiers separated by .
959 976 (?!\d)\w+
960 977 (?:\.(?!\d)\w+)*
961 978 '''),
962 979 True: re.compile(dict_key_re_fmt % '''
963 980 .+
964 981 ''')
965 982 }
966 983
967 984 match = regexps[self.greedy].search(self.text_until_cursor)
968 985 if match is None:
969 986 return []
970 987
971 988 expr, prefix = match.groups()
972 989 try:
973 990 obj = eval(expr, self.namespace)
974 991 except Exception:
975 992 try:
976 993 obj = eval(expr, self.global_namespace)
977 994 except Exception:
978 995 return []
979 996
980 997 keys = get_keys(obj)
981 998 if not keys:
982 999 return keys
983 1000 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
984 1001 if not matches:
985 1002 return matches
986 1003
987 1004 # get the cursor position of
988 1005 # - the text being completed
989 1006 # - the start of the key text
990 1007 # - the start of the completion
991 1008 text_start = len(self.text_until_cursor) - len(text)
992 1009 if prefix:
993 1010 key_start = match.start(2)
994 1011 completion_start = key_start + token_offset
995 1012 else:
996 1013 key_start = completion_start = match.end()
997 1014
998 1015 # grab the leading prefix, to make sure all completions start with `text`
999 1016 if text_start > key_start:
1000 1017 leading = ''
1001 1018 else:
1002 1019 leading = text[text_start:completion_start]
1003 1020
1004 1021 # the index of the `[` character
1005 1022 bracket_idx = match.end(1)
1006 1023
1007 1024 # append closing quote and bracket as appropriate
1008 1025 # this is *not* appropriate if the opening quote or bracket is outside
1009 1026 # the text given to this method
1010 1027 suf = ''
1011 1028 continuation = self.line_buffer[len(self.text_until_cursor):]
1012 1029 if key_start > text_start and closing_quote:
1013 1030 # quotes were opened inside text, maybe close them
1014 1031 if continuation.startswith(closing_quote):
1015 1032 continuation = continuation[len(closing_quote):]
1016 1033 else:
1017 1034 suf += closing_quote
1018 1035 if bracket_idx > text_start:
1019 1036 # brackets were opened inside text, maybe close them
1020 1037 if not continuation.startswith(']'):
1021 1038 suf += ']'
1022 1039
1023 1040 return [leading + k + suf for k in matches]
1024 1041
1025 1042 def unicode_name_matches(self, text):
1026 1043 u"""Match Latex-like syntax for unicode characters base
1027 1044 on the name of the character.
1028 1045
1029 1046 This does \\GREEK SMALL LETTER ETA -> Ξ·
1030 1047
1031 1048 Works only on valid python 3 identifier, or on combining characters that
1032 1049 will combine to form a valid identifier.
1033 1050
1034 1051 Used on Python 3 only.
1035 1052 """
1036 1053 slashpos = text.rfind('\\')
1037 1054 if slashpos > -1:
1038 1055 s = text[slashpos+1:]
1039 1056 try :
1040 1057 unic = unicodedata.lookup(s)
1041 1058 # allow combining chars
1042 1059 if ('a'+unic).isidentifier():
1043 1060 return '\\'+s,[unic]
1044 1061 except KeyError as e:
1045 1062 pass
1046 1063 return u'', []
1047 1064
1048 1065
1049 1066
1050 1067
1051 1068 def latex_matches(self, text):
1052 1069 u"""Match Latex syntax for unicode characters.
1053 1070
1054 1071 This does both \\alp -> \\alpha and \\alpha -> Ξ±
1055 1072
1056 1073 Used on Python 3 only.
1057 1074 """
1058 1075 slashpos = text.rfind('\\')
1059 1076 if slashpos > -1:
1060 1077 s = text[slashpos:]
1061 1078 if s in latex_symbols:
1062 1079 # Try to complete a full latex symbol to unicode
1063 1080 # \\alpha -> Ξ±
1064 1081 return s, [latex_symbols[s]]
1065 1082 else:
1066 1083 # If a user has partially typed a latex symbol, give them
1067 1084 # a full list of options \al -> [\aleph, \alpha]
1068 1085 matches = [k for k in latex_symbols if k.startswith(s)]
1069 1086 return s, matches
1070 1087 return u'', []
1071 1088
1072 1089 def dispatch_custom_completer(self, text):
1073 1090 #io.rprint("Custom! '%s' %s" % (text, self.custom_completers)) # dbg
1074 1091 line = self.line_buffer
1075 1092 if not line.strip():
1076 1093 return None
1077 1094
1078 1095 # Create a little structure to pass all the relevant information about
1079 1096 # the current completion to any custom completer.
1080 1097 event = Bunch()
1081 1098 event.line = line
1082 1099 event.symbol = text
1083 1100 cmd = line.split(None,1)[0]
1084 1101 event.command = cmd
1085 1102 event.text_until_cursor = self.text_until_cursor
1086 1103
1087 1104 #print "\ncustom:{%s]\n" % event # dbg
1088 1105
1089 1106 # for foo etc, try also to find completer for %foo
1090 1107 if not cmd.startswith(self.magic_escape):
1091 1108 try_magic = self.custom_completers.s_matches(
1092 1109 self.magic_escape + cmd)
1093 1110 else:
1094 1111 try_magic = []
1095 1112
1096 1113 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1097 1114 try_magic,
1098 1115 self.custom_completers.flat_matches(self.text_until_cursor)):
1099 1116 #print "try",c # dbg
1100 1117 try:
1101 1118 res = c(event)
1102 1119 if res:
1103 1120 # first, try case sensitive match
1104 1121 withcase = [r for r in res if r.startswith(text)]
1105 1122 if withcase:
1106 1123 return withcase
1107 1124 # if none, then case insensitive ones are ok too
1108 1125 text_low = text.lower()
1109 1126 return [r for r in res if r.lower().startswith(text_low)]
1110 1127 except TryNext:
1111 1128 pass
1112 1129
1113 1130 return None
1114 1131
1115 1132 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1116 1133 """Find completions for the given text and line context.
1117 1134
1118 1135 Note that both the text and the line_buffer are optional, but at least
1119 1136 one of them must be given.
1120 1137
1121 1138 Parameters
1122 1139 ----------
1123 1140 text : string, optional
1124 1141 Text to perform the completion on. If not given, the line buffer
1125 1142 is split using the instance's CompletionSplitter object.
1126 1143
1127 1144 line_buffer : string, optional
1128 1145 If not given, the completer attempts to obtain the current line
1129 1146 buffer via readline. This keyword allows clients which are
1130 1147 requesting for text completions in non-readline contexts to inform
1131 1148 the completer of the entire text.
1132 1149
1133 1150 cursor_pos : int, optional
1134 1151 Index of the cursor in the full line buffer. Should be provided by
1135 1152 remote frontends where kernel has no access to frontend state.
1136 1153
1137 1154 Returns
1138 1155 -------
1139 1156 text : str
1140 1157 Text that was actually used in the completion.
1141 1158
1142 1159 matches : list
1143 1160 A list of completion matches.
1144 1161 """
1145 1162 # io.rprint('\nCOMP1 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1146 1163
1147 1164 # if the cursor position isn't given, the only sane assumption we can
1148 1165 # make is that it's at the end of the line (the common case)
1149 1166 if cursor_pos is None:
1150 1167 cursor_pos = len(line_buffer) if text is None else len(text)
1151 1168
1152 1169 if PY3:
1153 1170
1154 1171 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1155 1172 latex_text, latex_matches = self.latex_matches(base_text)
1156 1173 if latex_matches:
1157 1174 return latex_text, latex_matches
1158 1175 name_text = ''
1159 1176 name_matches = []
1160 1177 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1161 1178 name_text, name_matches = meth(base_text)
1162 1179 if name_text:
1163 1180 return name_text, name_matches
1164 1181
1165 1182 # if text is either None or an empty string, rely on the line buffer
1166 1183 if not text:
1167 1184 text = self.splitter.split_line(line_buffer, cursor_pos)
1168 1185
1169 1186 # If no line buffer is given, assume the input text is all there was
1170 1187 if line_buffer is None:
1171 1188 line_buffer = text
1172 1189
1173 1190 self.line_buffer = line_buffer
1174 1191 self.text_until_cursor = self.line_buffer[:cursor_pos]
1175 1192 # io.rprint('COMP2 %r %r %r' % (text, line_buffer, cursor_pos)) # dbg
1176 1193
1177 1194 # Start with a clean slate of completions
1178 1195 self.matches[:] = []
1179 1196 custom_res = self.dispatch_custom_completer(text)
1180 1197 if custom_res is not None:
1181 1198 # did custom completers produce something?
1182 1199 self.matches = custom_res
1183 1200 else:
1184 1201 # Extend the list of completions with the results of each
1185 1202 # matcher, so we return results to the user from all
1186 1203 # namespaces.
1187 1204 if self.merge_completions:
1188 1205 self.matches = []
1189 1206 for matcher in self.matchers:
1190 1207 try:
1191 1208 self.matches.extend(matcher(text))
1192 1209 except:
1193 1210 # Show the ugly traceback if the matcher causes an
1194 1211 # exception, but do NOT crash the kernel!
1195 1212 sys.excepthook(*sys.exc_info())
1196 1213 else:
1197 1214 for matcher in self.matchers:
1198 1215 self.matches = matcher(text)
1199 1216 if self.matches:
1200 1217 break
1201 1218 # FIXME: we should extend our api to return a dict with completions for
1202 1219 # different types of objects. The rlcomplete() method could then
1203 1220 # simply collapse the dict into a list for readline, but we'd have
1204 1221 # richer completion semantics in other evironments.
1205 1222
1206 1223 self.matches = sorted(set(self.matches), key=completions_sorting_key)
1207 1224
1208 1225 #io.rprint('COMP TEXT, MATCHES: %r, %r' % (text, self.matches)) # dbg
1209 1226 return text, self.matches
1210 1227
1211 1228 def rlcomplete(self, text, state):
1212 1229 """Return the state-th possible completion for 'text'.
1213 1230
1214 1231 This is called successively with state == 0, 1, 2, ... until it
1215 1232 returns None. The completion should begin with 'text'.
1216 1233
1217 1234 Parameters
1218 1235 ----------
1219 1236 text : string
1220 1237 Text to perform the completion on.
1221 1238
1222 1239 state : int
1223 1240 Counter used by readline.
1224 1241 """
1225 1242 if state==0:
1226 1243
1227 1244 self.line_buffer = line_buffer = self.readline.get_line_buffer()
1228 1245 cursor_pos = self.readline.get_endidx()
1229 1246
1230 1247 #io.rprint("\nRLCOMPLETE: %r %r %r" %
1231 1248 # (text, line_buffer, cursor_pos) ) # dbg
1232 1249
1233 1250 # if there is only a tab on a line with only whitespace, instead of
1234 1251 # the mostly useless 'do you want to see all million completions'
1235 1252 # message, just do the right thing and give the user his tab!
1236 1253 # Incidentally, this enables pasting of tabbed text from an editor
1237 1254 # (as long as autoindent is off).
1238 1255
1239 1256 # It should be noted that at least pyreadline still shows file
1240 1257 # completions - is there a way around it?
1241 1258
1242 1259 # don't apply this on 'dumb' terminals, such as emacs buffers, so
1243 1260 # we don't interfere with their own tab-completion mechanism.
1244 1261 if not (self.dumb_terminal or line_buffer.strip()):
1245 1262 self.readline.insert_text('\t')
1246 1263 sys.stdout.flush()
1247 1264 return None
1248 1265
1249 1266 # Note: debugging exceptions that may occur in completion is very
1250 1267 # tricky, because readline unconditionally silences them. So if
1251 1268 # during development you suspect a bug in the completion code, turn
1252 1269 # this flag on temporarily by uncommenting the second form (don't
1253 1270 # flip the value in the first line, as the '# dbg' marker can be
1254 1271 # automatically detected and is used elsewhere).
1255 1272 DEBUG = False
1256 1273 #DEBUG = True # dbg
1257 1274 if DEBUG:
1258 1275 try:
1259 1276 self.complete(text, line_buffer, cursor_pos)
1260 1277 except:
1261 1278 import traceback; traceback.print_exc()
1262 1279 else:
1263 1280 # The normal production version is here
1264 1281
1265 1282 # This method computes the self.matches array
1266 1283 self.complete(text, line_buffer, cursor_pos)
1267 1284
1268 1285 try:
1269 1286 return self.matches[state]
1270 1287 except IndexError:
1271 1288 return None
1272 1289
@@ -1,780 +1,796 b''
1 1 # encoding: utf-8
2 2 """Tests for the IPython tab-completion machinery."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 import os
8 8 import sys
9 9 import unittest
10 10
11 11 from contextlib import contextmanager
12 12
13 13 import nose.tools as nt
14 14
15 15 from traitlets.config.loader import Config
16 16 from IPython import get_ipython
17 17 from IPython.core import completer
18 18 from IPython.external.decorators import knownfailureif
19 19 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
20 20 from IPython.utils.generics import complete_object
21 21 from IPython.utils.py3compat import string_types, unicode_type
22 22 from IPython.testing import decorators as dec
23 23
24 24 #-----------------------------------------------------------------------------
25 25 # Test functions
26 26 #-----------------------------------------------------------------------------
27 27
28 28 @contextmanager
29 29 def greedy_completion():
30 30 ip = get_ipython()
31 31 greedy_original = ip.Completer.greedy
32 32 try:
33 33 ip.Completer.greedy = True
34 34 yield
35 35 finally:
36 36 ip.Completer.greedy = greedy_original
37 37
38 38 def test_protect_filename():
39 39 pairs = [ ('abc','abc'),
40 40 (' abc',r'\ abc'),
41 41 ('a bc',r'a\ bc'),
42 42 ('a bc',r'a\ \ bc'),
43 43 (' bc',r'\ \ bc'),
44 44 ]
45 45 # On posix, we also protect parens and other special characters
46 46 if sys.platform != 'win32':
47 47 pairs.extend( [('a(bc',r'a\(bc'),
48 48 ('a)bc',r'a\)bc'),
49 49 ('a( )bc',r'a\(\ \)bc'),
50 50 ('a[1]bc', r'a\[1\]bc'),
51 51 ('a{1}bc', r'a\{1\}bc'),
52 52 ('a#bc', r'a\#bc'),
53 53 ('a?bc', r'a\?bc'),
54 54 ('a=bc', r'a\=bc'),
55 55 ('a\\bc', r'a\\bc'),
56 56 ('a|bc', r'a\|bc'),
57 57 ('a;bc', r'a\;bc'),
58 58 ('a:bc', r'a\:bc'),
59 59 ("a'bc", r"a\'bc"),
60 60 ('a*bc', r'a\*bc'),
61 61 ('a"bc', r'a\"bc'),
62 62 ('a^bc', r'a\^bc'),
63 63 ('a&bc', r'a\&bc'),
64 64 ] )
65 65 # run the actual tests
66 66 for s1, s2 in pairs:
67 67 s1p = completer.protect_filename(s1)
68 68 nt.assert_equal(s1p, s2)
69 69
70 70
71 71 def check_line_split(splitter, test_specs):
72 72 for part1, part2, split in test_specs:
73 73 cursor_pos = len(part1)
74 74 line = part1+part2
75 75 out = splitter.split_line(line, cursor_pos)
76 76 nt.assert_equal(out, split)
77 77
78 78
79 79 def test_line_split():
80 80 """Basic line splitter test with default specs."""
81 81 sp = completer.CompletionSplitter()
82 82 # The format of the test specs is: part1, part2, expected answer. Parts 1
83 83 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
84 84 # was at the end of part1. So an empty part2 represents someone hitting
85 85 # tab at the end of the line, the most common case.
86 86 t = [('run some/scrip', '', 'some/scrip'),
87 87 ('run scripts/er', 'ror.py foo', 'scripts/er'),
88 88 ('echo $HOM', '', 'HOM'),
89 89 ('print sys.pa', '', 'sys.pa'),
90 90 ('print(sys.pa', '', 'sys.pa'),
91 91 ("execfile('scripts/er", '', 'scripts/er'),
92 92 ('a[x.', '', 'x.'),
93 93 ('a[x.', 'y', 'x.'),
94 94 ('cd "some_file/', '', 'some_file/'),
95 95 ]
96 96 check_line_split(sp, t)
97 97 # Ensure splitting works OK with unicode by re-running the tests with
98 98 # all inputs turned into unicode
99 99 check_line_split(sp, [ map(unicode_type, p) for p in t] )
100 100
101 101
102 102 def test_custom_completion_error():
103 103 """Test that errors from custom attribute completers are silenced."""
104 104 ip = get_ipython()
105 105 class A(object): pass
106 106 ip.user_ns['a'] = A()
107 107
108 108 @complete_object.when_type(A)
109 109 def complete_A(a, existing_completions):
110 110 raise TypeError("this should be silenced")
111 111
112 112 ip.complete("a.")
113 113
114 114
115 115 def test_unicode_completions():
116 116 ip = get_ipython()
117 117 # Some strings that trigger different types of completion. Check them both
118 118 # in str and unicode forms
119 119 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
120 120 for t in s + list(map(unicode_type, s)):
121 121 # We don't need to check exact completion values (they may change
122 122 # depending on the state of the namespace, but at least no exceptions
123 123 # should be thrown and the return value should be a pair of text, list
124 124 # values.
125 125 text, matches = ip.complete(t)
126 126 nt.assert_true(isinstance(text, string_types))
127 127 nt.assert_true(isinstance(matches, list))
128 128
129 129 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
130 130 def test_latex_completions():
131 131 from IPython.core.latex_symbols import latex_symbols
132 132 import random
133 133 ip = get_ipython()
134 134 # Test some random unicode symbols
135 135 keys = random.sample(latex_symbols.keys(), 10)
136 136 for k in keys:
137 137 text, matches = ip.complete(k)
138 138 nt.assert_equal(len(matches),1)
139 139 nt.assert_equal(text, k)
140 140 nt.assert_equal(matches[0], latex_symbols[k])
141 141 # Test a more complex line
142 142 text, matches = ip.complete(u'print(\\alpha')
143 143 nt.assert_equals(text, u'\\alpha')
144 144 nt.assert_equals(matches[0], latex_symbols['\\alpha'])
145 145 # Test multiple matching latex symbols
146 146 text, matches = ip.complete(u'\\al')
147 147 nt.assert_in('\\alpha', matches)
148 148 nt.assert_in('\\aleph', matches)
149 149
150 150
151 151
152 152
153 153 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
154 154 def test_back_latex_completion():
155 155 ip = get_ipython()
156 156
157 157 # do not return more than 1 matches fro \beta, only the latex one.
158 158 name, matches = ip.complete('\\Ξ²')
159 159 nt.assert_equal(len(matches), 1)
160 160 nt.assert_equal(matches[0], '\\beta')
161 161
162 162 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
163 163 def test_back_unicode_completion():
164 164 ip = get_ipython()
165 165
166 166 name, matches = ip.complete('\\β…€')
167 167 nt.assert_equal(len(matches), 1)
168 168 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
169 169
170 170
171 171 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
172 172 def test_forward_unicode_completion():
173 173 ip = get_ipython()
174 174
175 175 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
176 176 nt.assert_equal(len(matches), 1)
177 177 nt.assert_equal(matches[0], 'β…€')
178 178
179 179 @dec.onlyif(sys.version_info[0] >= 3, 'This test only apply on python3')
180 180 def test_no_ascii_back_completion():
181 181 ip = get_ipython()
182 182 with TemporaryWorkingDirectory(): # Avoid any filename completions
183 183 # single ascii letter that don't have yet completions
184 184 for letter in 'fjqyJMQVWY' :
185 185 name, matches = ip.complete('\\'+letter)
186 186 nt.assert_equal(matches, [])
187 187
188 188
189 189
190 190
191 191 class CompletionSplitterTestCase(unittest.TestCase):
192 192 def setUp(self):
193 193 self.sp = completer.CompletionSplitter()
194 194
195 195 def test_delim_setting(self):
196 196 self.sp.delims = ' '
197 197 nt.assert_equal(self.sp.delims, ' ')
198 198 nt.assert_equal(self.sp._delim_expr, '[\ ]')
199 199
200 200 def test_spaces(self):
201 201 """Test with only spaces as split chars."""
202 202 self.sp.delims = ' '
203 203 t = [('foo', '', 'foo'),
204 204 ('run foo', '', 'foo'),
205 205 ('run foo', 'bar', 'foo'),
206 206 ]
207 207 check_line_split(self.sp, t)
208 208
209 209
210 210 def test_has_open_quotes1():
211 211 for s in ["'", "'''", "'hi' '"]:
212 212 nt.assert_equal(completer.has_open_quotes(s), "'")
213 213
214 214
215 215 def test_has_open_quotes2():
216 216 for s in ['"', '"""', '"hi" "']:
217 217 nt.assert_equal(completer.has_open_quotes(s), '"')
218 218
219 219
220 220 def test_has_open_quotes3():
221 221 for s in ["''", "''' '''", "'hi' 'ipython'"]:
222 222 nt.assert_false(completer.has_open_quotes(s))
223 223
224 224
225 225 def test_has_open_quotes4():
226 226 for s in ['""', '""" """', '"hi" "ipython"']:
227 227 nt.assert_false(completer.has_open_quotes(s))
228 228
229 229
230 230 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
231 231 def test_abspath_file_completions():
232 232 ip = get_ipython()
233 233 with TemporaryDirectory() as tmpdir:
234 234 prefix = os.path.join(tmpdir, 'foo')
235 235 suffixes = ['1', '2']
236 236 names = [prefix+s for s in suffixes]
237 237 for n in names:
238 238 open(n, 'w').close()
239 239
240 240 # Check simple completion
241 241 c = ip.complete(prefix)[1]
242 242 nt.assert_equal(c, names)
243 243
244 244 # Now check with a function call
245 245 cmd = 'a = f("%s' % prefix
246 246 c = ip.complete(prefix, cmd)[1]
247 247 comp = [prefix+s for s in suffixes]
248 248 nt.assert_equal(c, comp)
249 249
250 250
251 251 def test_local_file_completions():
252 252 ip = get_ipython()
253 253 with TemporaryWorkingDirectory():
254 254 prefix = './foo'
255 255 suffixes = ['1', '2']
256 256 names = [prefix+s for s in suffixes]
257 257 for n in names:
258 258 open(n, 'w').close()
259 259
260 260 # Check simple completion
261 261 c = ip.complete(prefix)[1]
262 262 nt.assert_equal(c, names)
263 263
264 264 # Now check with a function call
265 265 cmd = 'a = f("%s' % prefix
266 266 c = ip.complete(prefix, cmd)[1]
267 267 comp = [prefix+s for s in suffixes]
268 268 nt.assert_equal(c, comp)
269 269
270 270
271 271 def test_greedy_completions():
272 272 ip = get_ipython()
273 273 ip.ex('a=list(range(5))')
274 274 _,c = ip.complete('.',line='a[0].')
275 275 nt.assert_false('a[0].real' in c,
276 276 "Shouldn't have completed on a[0]: %s"%c)
277 277 with greedy_completion():
278 278 _,c = ip.complete('.',line='a[0].')
279 279 nt.assert_true('a[0].real' in c, "Should have completed on a[0]: %s"%c)
280 280
281 281
282 282 def test_omit__names():
283 283 # also happens to test IPCompleter as a configurable
284 284 ip = get_ipython()
285 285 ip._hidden_attr = 1
286 286 ip._x = {}
287 287 c = ip.Completer
288 288 ip.ex('ip=get_ipython()')
289 289 cfg = Config()
290 290 cfg.IPCompleter.omit__names = 0
291 291 c.update_config(cfg)
292 292 s,matches = c.complete('ip.')
293 293 nt.assert_in('ip.__str__', matches)
294 294 nt.assert_in('ip._hidden_attr', matches)
295 295 cfg = Config()
296 296 cfg.IPCompleter.omit__names = 1
297 297 c.update_config(cfg)
298 298 s,matches = c.complete('ip.')
299 299 nt.assert_not_in('ip.__str__', matches)
300 300 nt.assert_in('ip._hidden_attr', matches)
301 301 cfg = Config()
302 302 cfg.IPCompleter.omit__names = 2
303 303 c.update_config(cfg)
304 304 s,matches = c.complete('ip.')
305 305 nt.assert_not_in('ip.__str__', matches)
306 306 nt.assert_not_in('ip._hidden_attr', matches)
307 307 s,matches = c.complete('ip._x.')
308 308 nt.assert_in('ip._x.keys', matches)
309 309 del ip._hidden_attr
310 310
311 311
312 312 def test_limit_to__all__False_ok():
313 313 ip = get_ipython()
314 314 c = ip.Completer
315 315 ip.ex('class D: x=24')
316 316 ip.ex('d=D()')
317 317 cfg = Config()
318 318 cfg.IPCompleter.limit_to__all__ = False
319 319 c.update_config(cfg)
320 320 s, matches = c.complete('d.')
321 321 nt.assert_in('d.x', matches)
322 322
323 323
324 324 def test_limit_to__all__True_ok():
325 325 ip = get_ipython()
326 326 c = ip.Completer
327 327 ip.ex('class D: x=24')
328 328 ip.ex('d=D()')
329 329 ip.ex("d.__all__=['z']")
330 330 cfg = Config()
331 331 cfg.IPCompleter.limit_to__all__ = True
332 332 c.update_config(cfg)
333 333 s, matches = c.complete('d.')
334 334 nt.assert_in('d.z', matches)
335 335 nt.assert_not_in('d.x', matches)
336 336
337 337
338 338 def test_get__all__entries_ok():
339 339 class A(object):
340 340 __all__ = ['x', 1]
341 341 words = completer.get__all__entries(A())
342 342 nt.assert_equal(words, ['x'])
343 343
344 344
345 345 def test_get__all__entries_no__all__ok():
346 346 class A(object):
347 347 pass
348 348 words = completer.get__all__entries(A())
349 349 nt.assert_equal(words, [])
350 350
351 351
352 352 def test_func_kw_completions():
353 353 ip = get_ipython()
354 354 c = ip.Completer
355 355 ip.ex('def myfunc(a=1,b=2): return a+b')
356 356 s, matches = c.complete(None, 'myfunc(1,b')
357 357 nt.assert_in('b=', matches)
358 358 # Simulate completing with cursor right after b (pos==10):
359 359 s, matches = c.complete(None, 'myfunc(1,b)', 10)
360 360 nt.assert_in('b=', matches)
361 361 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
362 362 nt.assert_in('b=', matches)
363 363 #builtin function
364 364 s, matches = c.complete(None, 'min(k, k')
365 365 nt.assert_in('key=', matches)
366 366
367 367
368 368 def test_default_arguments_from_docstring():
369 369 doc = min.__doc__
370 370 ip = get_ipython()
371 371 c = ip.Completer
372 372 kwd = c._default_arguments_from_docstring(
373 373 'min(iterable[, key=func]) -> value')
374 374 nt.assert_equal(kwd, ['key'])
375 375 #with cython type etc
376 376 kwd = c._default_arguments_from_docstring(
377 377 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
378 378 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
379 379 #white spaces
380 380 kwd = c._default_arguments_from_docstring(
381 381 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
382 382 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
383 383
384 384 def test_line_magics():
385 385 ip = get_ipython()
386 386 c = ip.Completer
387 387 s, matches = c.complete(None, 'lsmag')
388 388 nt.assert_in('%lsmagic', matches)
389 389 s, matches = c.complete(None, '%lsmag')
390 390 nt.assert_in('%lsmagic', matches)
391 391
392 392
393 393 def test_cell_magics():
394 394 from IPython.core.magic import register_cell_magic
395 395
396 396 @register_cell_magic
397 397 def _foo_cellm(line, cell):
398 398 pass
399 399
400 400 ip = get_ipython()
401 401 c = ip.Completer
402 402
403 403 s, matches = c.complete(None, '_foo_ce')
404 404 nt.assert_in('%%_foo_cellm', matches)
405 405 s, matches = c.complete(None, '%%_foo_ce')
406 406 nt.assert_in('%%_foo_cellm', matches)
407 407
408 408
409 409 def test_line_cell_magics():
410 410 from IPython.core.magic import register_line_cell_magic
411 411
412 412 @register_line_cell_magic
413 413 def _bar_cellm(line, cell):
414 414 pass
415 415
416 416 ip = get_ipython()
417 417 c = ip.Completer
418 418
419 419 # The policy here is trickier, see comments in completion code. The
420 420 # returned values depend on whether the user passes %% or not explicitly,
421 421 # and this will show a difference if the same name is both a line and cell
422 422 # magic.
423 423 s, matches = c.complete(None, '_bar_ce')
424 424 nt.assert_in('%_bar_cellm', matches)
425 425 nt.assert_in('%%_bar_cellm', matches)
426 426 s, matches = c.complete(None, '%_bar_ce')
427 427 nt.assert_in('%_bar_cellm', matches)
428 428 nt.assert_in('%%_bar_cellm', matches)
429 429 s, matches = c.complete(None, '%%_bar_ce')
430 430 nt.assert_not_in('%_bar_cellm', matches)
431 431 nt.assert_in('%%_bar_cellm', matches)
432 432
433 433
434 434 def test_magic_completion_order():
435 435
436 436 ip = get_ipython()
437 437 c = ip.Completer
438 438
439 439 # Test ordering of magics and non-magics with the same name
440 440 # We want the non-magic first
441 441
442 442 # Before importing matplotlib, there should only be one option:
443 443
444 444 text, matches = c.complete('mat')
445 445 nt.assert_equal(matches, ["%matplotlib"])
446 446
447 447
448 448 ip.run_cell("matplotlib = 1") # introduce name into namespace
449 449
450 450 # After the import, there should be two options, ordered like this:
451 451 text, matches = c.complete('mat')
452 452 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
453 453
454 454
455 455 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
456 456
457 457 # Order of user variable and line and cell magics with same name:
458 458 text, matches = c.complete('timeit')
459 459 nt.assert_equal(matches, ["timeit", "%timeit","%%timeit"])
460 460
461 461
462 462 def test_dict_key_completion_string():
463 463 """Test dictionary key completion for string keys"""
464 464 ip = get_ipython()
465 465 complete = ip.Completer.complete
466 466
467 467 ip.user_ns['d'] = {'abc': None}
468 468
469 469 # check completion at different stages
470 470 _, matches = complete(line_buffer="d[")
471 471 nt.assert_in("'abc'", matches)
472 472 nt.assert_not_in("'abc']", matches)
473 473
474 474 _, matches = complete(line_buffer="d['")
475 475 nt.assert_in("abc", matches)
476 476 nt.assert_not_in("abc']", matches)
477 477
478 478 _, matches = complete(line_buffer="d['a")
479 479 nt.assert_in("abc", matches)
480 480 nt.assert_not_in("abc']", matches)
481 481
482 482 # check use of different quoting
483 483 _, matches = complete(line_buffer="d[\"")
484 484 nt.assert_in("abc", matches)
485 485 nt.assert_not_in('abc\"]', matches)
486 486
487 487 _, matches = complete(line_buffer="d[\"a")
488 488 nt.assert_in("abc", matches)
489 489 nt.assert_not_in('abc\"]', matches)
490 490
491 491 # check sensitivity to following context
492 492 _, matches = complete(line_buffer="d[]", cursor_pos=2)
493 493 nt.assert_in("'abc'", matches)
494 494
495 495 _, matches = complete(line_buffer="d['']", cursor_pos=3)
496 496 nt.assert_in("abc", matches)
497 497 nt.assert_not_in("abc'", matches)
498 498 nt.assert_not_in("abc']", matches)
499 499
500 500 # check multiple solutions are correctly returned and that noise is not
501 501 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
502 502 5: None}
503 503
504 504 _, matches = complete(line_buffer="d['a")
505 505 nt.assert_in("abc", matches)
506 506 nt.assert_in("abd", matches)
507 507 nt.assert_not_in("bad", matches)
508 508 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
509 509
510 510 # check escaping and whitespace
511 511 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
512 512 _, matches = complete(line_buffer="d['a")
513 513 nt.assert_in("a\\nb", matches)
514 514 nt.assert_in("a\\'b", matches)
515 515 nt.assert_in("a\"b", matches)
516 516 nt.assert_in("a word", matches)
517 517 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
518 518
519 519 # - can complete on non-initial word of the string
520 520 _, matches = complete(line_buffer="d['a w")
521 521 nt.assert_in("word", matches)
522 522
523 523 # - understands quote escaping
524 524 _, matches = complete(line_buffer="d['a\\'")
525 525 nt.assert_in("b", matches)
526 526
527 527 # - default quoting should work like repr
528 528 _, matches = complete(line_buffer="d[")
529 529 nt.assert_in("\"a'b\"", matches)
530 530
531 531 # - when opening quote with ", possible to match with unescaped apostrophe
532 532 _, matches = complete(line_buffer="d[\"a'")
533 533 nt.assert_in("b", matches)
534 534
535 535 # need to not split at delims that readline won't split at
536 536 if '-' not in ip.Completer.splitter.delims:
537 537 ip.user_ns['d'] = {'before-after': None}
538 538 _, matches = complete(line_buffer="d['before-af")
539 539 nt.assert_in('before-after', matches)
540 540
541 541 def test_dict_key_completion_contexts():
542 542 """Test expression contexts in which dict key completion occurs"""
543 543 ip = get_ipython()
544 544 complete = ip.Completer.complete
545 545 d = {'abc': None}
546 546 ip.user_ns['d'] = d
547 547
548 548 class C:
549 549 data = d
550 550 ip.user_ns['C'] = C
551 551 ip.user_ns['get'] = lambda: d
552 552
553 553 def assert_no_completion(**kwargs):
554 554 _, matches = complete(**kwargs)
555 555 nt.assert_not_in('abc', matches)
556 556 nt.assert_not_in('abc\'', matches)
557 557 nt.assert_not_in('abc\']', matches)
558 558 nt.assert_not_in('\'abc\'', matches)
559 559 nt.assert_not_in('\'abc\']', matches)
560 560
561 561 def assert_completion(**kwargs):
562 562 _, matches = complete(**kwargs)
563 563 nt.assert_in("'abc'", matches)
564 564 nt.assert_not_in("'abc']", matches)
565 565
566 566 # no completion after string closed, even if reopened
567 567 assert_no_completion(line_buffer="d['a'")
568 568 assert_no_completion(line_buffer="d[\"a\"")
569 569 assert_no_completion(line_buffer="d['a' + ")
570 570 assert_no_completion(line_buffer="d['a' + '")
571 571
572 572 # completion in non-trivial expressions
573 573 assert_completion(line_buffer="+ d[")
574 574 assert_completion(line_buffer="(d[")
575 575 assert_completion(line_buffer="C.data[")
576 576
577 577 # greedy flag
578 578 def assert_completion(**kwargs):
579 579 _, matches = complete(**kwargs)
580 580 nt.assert_in("get()['abc']", matches)
581 581
582 582 assert_no_completion(line_buffer="get()[")
583 583 with greedy_completion():
584 584 assert_completion(line_buffer="get()[")
585 585 assert_completion(line_buffer="get()['")
586 586 assert_completion(line_buffer="get()['a")
587 587 assert_completion(line_buffer="get()['ab")
588 588 assert_completion(line_buffer="get()['abc")
589 589
590 590
591 591
592 592 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
593 593 def test_dict_key_completion_bytes():
594 594 """Test handling of bytes in dict key completion"""
595 595 ip = get_ipython()
596 596 complete = ip.Completer.complete
597 597
598 598 ip.user_ns['d'] = {'abc': None, b'abd': None}
599 599
600 600 _, matches = complete(line_buffer="d[")
601 601 nt.assert_in("'abc'", matches)
602 602 nt.assert_in("b'abd'", matches)
603 603
604 604 if False: # not currently implemented
605 605 _, matches = complete(line_buffer="d[b")
606 606 nt.assert_in("b'abd'", matches)
607 607 nt.assert_not_in("b'abc'", matches)
608 608
609 609 _, matches = complete(line_buffer="d[b'")
610 610 nt.assert_in("abd", matches)
611 611 nt.assert_not_in("abc", matches)
612 612
613 613 _, matches = complete(line_buffer="d[B'")
614 614 nt.assert_in("abd", matches)
615 615 nt.assert_not_in("abc", matches)
616 616
617 617 _, matches = complete(line_buffer="d['")
618 618 nt.assert_in("abc", matches)
619 619 nt.assert_not_in("abd", matches)
620 620
621 621
622 622 @dec.onlyif(sys.version_info[0] < 3, 'This test only applies in Py<3')
623 623 def test_dict_key_completion_unicode_py2():
624 624 """Test handling of unicode in dict key completion"""
625 625 ip = get_ipython()
626 626 complete = ip.Completer.complete
627 627
628 628 ip.user_ns['d'] = {u'abc': None,
629 629 u'a\u05d0b': None}
630 630
631 631 _, matches = complete(line_buffer="d[")
632 632 nt.assert_in("u'abc'", matches)
633 633 nt.assert_in("u'a\\u05d0b'", matches)
634 634
635 635 _, matches = complete(line_buffer="d['a")
636 636 nt.assert_in("abc", matches)
637 637 nt.assert_not_in("a\\u05d0b", matches)
638 638
639 639 _, matches = complete(line_buffer="d[u'a")
640 640 nt.assert_in("abc", matches)
641 641 nt.assert_in("a\\u05d0b", matches)
642 642
643 643 _, matches = complete(line_buffer="d[U'a")
644 644 nt.assert_in("abc", matches)
645 645 nt.assert_in("a\\u05d0b", matches)
646 646
647 647 # query using escape
648 648 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
649 649 nt.assert_in("u05d0b", matches) # tokenized after \\
650 650
651 651 # query using character
652 652 _, matches = complete(line_buffer=u"d[u'a\u05d0")
653 653 nt.assert_in(u"a\u05d0b", matches)
654 654
655 655 with greedy_completion():
656 656 _, matches = complete(line_buffer="d[")
657 657 nt.assert_in("d[u'abc']", matches)
658 658 nt.assert_in("d[u'a\\u05d0b']", matches)
659 659
660 660 _, matches = complete(line_buffer="d['a")
661 661 nt.assert_in("d['abc']", matches)
662 662 nt.assert_not_in("d[u'a\\u05d0b']", matches)
663 663
664 664 _, matches = complete(line_buffer="d[u'a")
665 665 nt.assert_in("d[u'abc']", matches)
666 666 nt.assert_in("d[u'a\\u05d0b']", matches)
667 667
668 668 _, matches = complete(line_buffer="d[U'a")
669 669 nt.assert_in("d[U'abc']", matches)
670 670 nt.assert_in("d[U'a\\u05d0b']", matches)
671 671
672 672 # query using escape
673 673 _, matches = complete(line_buffer=u"d[u'a\\u05d0")
674 674 nt.assert_in("d[u'a\\u05d0b']", matches) # tokenized after \\
675 675
676 676 # query using character
677 677 _, matches = complete(line_buffer=u"d[u'a\u05d0")
678 678 nt.assert_in(u"d[u'a\u05d0b']", matches)
679 679
680 680
681 681 @dec.onlyif(sys.version_info[0] >= 3, 'This test only applies in Py>=3')
682 682 def test_dict_key_completion_unicode_py3():
683 683 """Test handling of unicode in dict key completion"""
684 684 ip = get_ipython()
685 685 complete = ip.Completer.complete
686 686
687 687 ip.user_ns['d'] = {u'a\u05d0': None}
688 688
689 689 # query using escape
690 690 _, matches = complete(line_buffer="d['a\\u05d0")
691 691 nt.assert_in("u05d0", matches) # tokenized after \\
692 692
693 693 # query using character
694 694 _, matches = complete(line_buffer="d['a\u05d0")
695 695 nt.assert_in(u"a\u05d0", matches)
696 696
697 697 with greedy_completion():
698 698 # query using escape
699 699 _, matches = complete(line_buffer="d['a\\u05d0")
700 700 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
701 701
702 702 # query using character
703 703 _, matches = complete(line_buffer="d['a\u05d0")
704 704 nt.assert_in(u"d['a\u05d0']", matches)
705 705
706 706
707 707
708 708 @dec.skip_without('numpy')
709 709 def test_struct_array_key_completion():
710 710 """Test dict key completion applies to numpy struct arrays"""
711 711 import numpy
712 712 ip = get_ipython()
713 713 complete = ip.Completer.complete
714 714 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
715 715 _, matches = complete(line_buffer="d['")
716 716 nt.assert_in("hello", matches)
717 717 nt.assert_in("world", matches)
718 718 # complete on the numpy struct itself
719 719 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
720 720 ('my_data', '>f4', 5)])
721 721 x = numpy.zeros(2, dtype=dt)
722 722 ip.user_ns['d'] = x[1]
723 723 _, matches = complete(line_buffer="d['")
724 724 nt.assert_in("my_head", matches)
725 725 nt.assert_in("my_data", matches)
726 726 # complete on a nested level
727 727 with greedy_completion():
728 728 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
729 729 _, matches = complete(line_buffer="d[1]['my_head']['")
730 730 nt.assert_true(any(["my_dt" in m for m in matches]))
731 731 nt.assert_true(any(["my_df" in m for m in matches]))
732 732
733 733
734 734 @dec.skip_without('pandas')
735 735 def test_dataframe_key_completion():
736 736 """Test dict key completion applies to pandas DataFrames"""
737 737 import pandas
738 738 ip = get_ipython()
739 739 complete = ip.Completer.complete
740 740 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
741 741 _, matches = complete(line_buffer="d['")
742 742 nt.assert_in("hello", matches)
743 743 nt.assert_in("world", matches)
744 744
745 745
746 746 def test_dict_key_completion_invalids():
747 747 """Smoke test cases dict key completion can't handle"""
748 748 ip = get_ipython()
749 749 complete = ip.Completer.complete
750 750
751 751 ip.user_ns['no_getitem'] = None
752 752 ip.user_ns['no_keys'] = []
753 753 ip.user_ns['cant_call_keys'] = dict
754 754 ip.user_ns['empty'] = {}
755 755 ip.user_ns['d'] = {'abc': 5}
756 756
757 757 _, matches = complete(line_buffer="no_getitem['")
758 758 _, matches = complete(line_buffer="no_keys['")
759 759 _, matches = complete(line_buffer="cant_call_keys['")
760 760 _, matches = complete(line_buffer="empty['")
761 761 _, matches = complete(line_buffer="name_error['")
762 762 _, matches = complete(line_buffer="d['\\") # incomplete escape
763 763
764 class KeyCompletable(object):
765 def __init__(self, things=()):
766 self.things = things
767
768 def _ipy_key_completions_(self):
769 return list(self.things)
770
771 def test_object_key_completion():
772 ip = get_ipython()
773 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
774
775 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
776 nt.assert_in('qwerty', matches)
777 nt.assert_in('qwick', matches)
778
779
764 780 def test_aimport_module_completer():
765 781 ip = get_ipython()
766 782 _, matches = ip.complete('i', '%aimport i')
767 783 nt.assert_in('io', matches)
768 784 nt.assert_not_in('int', matches)
769 785
770 786 def test_import_module_completer():
771 787 ip = get_ipython()
772 788 _, matches = ip.complete('i', 'import i')
773 789 nt.assert_in('io', matches)
774 790 nt.assert_not_in('int', matches)
775 791
776 792 def test_from_module_completer():
777 793 ip = get_ipython()
778 794 _, matches = ip.complete('B', 'from io import B')
779 795 nt.assert_in('BytesIO', matches)
780 796 nt.assert_not_in('BaseException', matches)
General Comments 0
You need to be logged in to leave comments. Login now