##// END OF EJS Templates
Add completion for %colors magic
Sang Min Park -
Show More
@@ -1,1933 +1,1953 b''
1 1 # encoding: utf-8
2 2 """Completion for IPython.
3 3
4 4 This module started as fork of the rlcompleter module in the Python standard
5 5 library. The original enhancements made to rlcompleter have been sent
6 6 upstream and were accepted as of Python 2.3,
7 7
8 8 This module now support a wide variety of completion mechanism both available
9 9 for normal classic Python code, as well as completer for IPython specific
10 10 Syntax like magics.
11 11
12 12 Latex and Unicode completion
13 13 ============================
14 14
15 15 IPython and compatible frontends not only can complete your code, but can help
16 16 you to input a wide range of characters. In particular we allow you to insert
17 17 a unicode character using the tab completion mechanism.
18 18
19 19 Forward latex/unicode completion
20 20 --------------------------------
21 21
22 22 Forward completion allows you to easily type a unicode character using its latex
23 23 name, or unicode long description. To do so type a backslash follow by the
24 24 relevant name and press tab:
25 25
26 26
27 27 Using latex completion:
28 28
29 29 .. code::
30 30
31 31 \\alpha<tab>
32 32 Ξ±
33 33
34 34 or using unicode completion:
35 35
36 36
37 37 .. code::
38 38
39 39 \\greek small letter alpha<tab>
40 40 Ξ±
41 41
42 42
43 43 Only valid Python identifiers will complete. Combining characters (like arrow or
44 44 dots) are also available, unlike latex they need to be put after the their
45 45 counterpart that is to say, `F\\\\vec<tab>` is correct, not `\\\\vec<tab>F`.
46 46
47 47 Some browsers are known to display combining characters incorrectly.
48 48
49 49 Backward latex completion
50 50 -------------------------
51 51
52 52 It is sometime challenging to know how to type a character, if you are using
53 53 IPython, or any compatible frontend you can prepend backslash to the character
54 54 and press `<tab>` to expand it to its latex form.
55 55
56 56 .. code::
57 57
58 58 \\Ξ±<tab>
59 59 \\alpha
60 60
61 61
62 62 Both forward and backward completions can be deactivated by setting the
63 63 ``Completer.backslash_combining_completions`` option to ``False``.
64 64
65 65
66 66 Experimental
67 67 ============
68 68
69 69 Starting with IPython 6.0, this module can make use of the Jedi library to
70 70 generate completions both using static analysis of the code, and dynamically
71 71 inspecting multiple namespaces. The APIs attached to this new mechanism is
72 72 unstable and will raise unless use in an :any:`provisionalcompleter` context
73 73 manager.
74 74
75 75 You will find that the following are experimental:
76 76
77 77 - :any:`provisionalcompleter`
78 78 - :any:`IPCompleter.completions`
79 79 - :any:`Completion`
80 80 - :any:`rectify_completions`
81 81
82 82 .. note::
83 83
84 84 better name for :any:`rectify_completions` ?
85 85
86 86 We welcome any feedback on these new API, and we also encourage you to try this
87 87 module in debug mode (start IPython with ``--Completer.debug=True``) in order
88 88 to have extra logging information is :any:`jedi` is crashing, or if current
89 89 IPython completer pending deprecations are returning results not yet handled
90 90 by :any:`jedi`.
91 91
92 92 Using Jedi for tab completion allow snippets like the following to work without
93 93 having to execute any code:
94 94
95 95 >>> myvar = ['hello', 42]
96 96 ... myvar[1].bi<tab>
97 97
98 98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
99 99 executing any code unlike the previously available ``IPCompleter.greedy``
100 100 option.
101 101
102 102 Be sure to update :any:`jedi` to the latest stable version or to try the
103 103 current development version to get better completions.
104 104 """
105 105
106 106 # skip module docstests
107 107 skip_doctest = True
108 108
109 109 # Copyright (c) IPython Development Team.
110 110 # Distributed under the terms of the Modified BSD License.
111 111 #
112 112 # Some of this code originated from rlcompleter in the Python standard library
113 113 # Copyright (C) 2001 Python Software Foundation, www.python.org
114 114
115 115
116 116 import __main__
117 117 import builtins as builtin_mod
118 118 import glob
119 119 import time
120 120 import inspect
121 121 import itertools
122 122 import keyword
123 123 import os
124 124 import re
125 125 import sys
126 126 import unicodedata
127 127 import string
128 128 import warnings
129 129
130 130 from contextlib import contextmanager
131 131 from importlib import import_module
132 132 from typing import Iterator, List, Tuple, Iterable, Union
133 133 from types import SimpleNamespace
134 134
135 135 from traitlets.config.configurable import Configurable
136 136 from IPython.core.error import TryNext
137 137 from IPython.core.inputsplitter import ESC_MAGIC
138 138 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
139 from IPython.core.oinspect import InspectColors
139 140 from IPython.utils import generics
140 141 from IPython.utils.dir2 import dir2, get_real_method
141 142 from IPython.utils.process import arg_split
142 143 from traitlets import Bool, Enum, observe, Int
143 144
144 145 try:
145 146 import jedi
146 147 import jedi.api.helpers
147 148 JEDI_INSTALLED = True
148 149 except ImportError:
149 150 JEDI_INSTALLED = False
150 151 #-----------------------------------------------------------------------------
151 152 # Globals
152 153 #-----------------------------------------------------------------------------
153 154
154 155 # Public API
155 156 __all__ = ['Completer','IPCompleter']
156 157
157 158 if sys.platform == 'win32':
158 159 PROTECTABLES = ' '
159 160 else:
160 161 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
161 162
162 163
163 164 _deprecation_readline_sentinel = object()
164 165
165 166
166 167 class ProvisionalCompleterWarning(FutureWarning):
167 168 """
168 169 Exception raise by an experimental feature in this module.
169 170
170 171 Wrap code in :any:`provisionalcompleter` context manager if you
171 172 are certain you want to use an unstable feature.
172 173 """
173 174 pass
174 175
175 176 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
176 177
177 178 @contextmanager
178 179 def provisionalcompleter(action='ignore'):
179 180 """
180 181
181 182
182 183 This contest manager has to be used in any place where unstable completer
183 184 behavior and API may be called.
184 185
185 186 >>> with provisionalcompleter():
186 187 ... completer.do_experimetal_things() # works
187 188
188 189 >>> completer.do_experimental_things() # raises.
189 190
190 191 .. note:: Unstable
191 192
192 193 By using this context manager you agree that the API in use may change
193 194 without warning, and that you won't complain if they do so.
194 195
195 196 You also understand that if the API is not to you liking you should report
196 197 a bug to explain your use case upstream and improve the API and will loose
197 198 credibility if you complain after the API is make stable.
198 199
199 200 We'll be happy to get your feedback , feature request and improvement on
200 201 any of the unstable APIs !
201 202 """
202 203 with warnings.catch_warnings():
203 204 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
204 205 yield
205 206
206 207
207 208 def has_open_quotes(s):
208 209 """Return whether a string has open quotes.
209 210
210 211 This simply counts whether the number of quote characters of either type in
211 212 the string is odd.
212 213
213 214 Returns
214 215 -------
215 216 If there is an open quote, the quote character is returned. Else, return
216 217 False.
217 218 """
218 219 # We check " first, then ', so complex cases with nested quotes will get
219 220 # the " to take precedence.
220 221 if s.count('"') % 2:
221 222 return '"'
222 223 elif s.count("'") % 2:
223 224 return "'"
224 225 else:
225 226 return False
226 227
227 228
228 229 def protect_filename(s, protectables=PROTECTABLES):
229 230 """Escape a string to protect certain characters."""
230 231 if set(s) & set(protectables):
231 232 if sys.platform == "win32":
232 233 return '"' + s + '"'
233 234 else:
234 235 return "".join(("\\" + c if c in protectables else c) for c in s)
235 236 else:
236 237 return s
237 238
238 239
239 240 def expand_user(path):
240 241 """Expand ``~``-style usernames in strings.
241 242
242 243 This is similar to :func:`os.path.expanduser`, but it computes and returns
243 244 extra information that will be useful if the input was being used in
244 245 computing completions, and you wish to return the completions with the
245 246 original '~' instead of its expanded value.
246 247
247 248 Parameters
248 249 ----------
249 250 path : str
250 251 String to be expanded. If no ~ is present, the output is the same as the
251 252 input.
252 253
253 254 Returns
254 255 -------
255 256 newpath : str
256 257 Result of ~ expansion in the input path.
257 258 tilde_expand : bool
258 259 Whether any expansion was performed or not.
259 260 tilde_val : str
260 261 The value that ~ was replaced with.
261 262 """
262 263 # Default values
263 264 tilde_expand = False
264 265 tilde_val = ''
265 266 newpath = path
266 267
267 268 if path.startswith('~'):
268 269 tilde_expand = True
269 270 rest = len(path)-1
270 271 newpath = os.path.expanduser(path)
271 272 if rest:
272 273 tilde_val = newpath[:-rest]
273 274 else:
274 275 tilde_val = newpath
275 276
276 277 return newpath, tilde_expand, tilde_val
277 278
278 279
279 280 def compress_user(path, tilde_expand, tilde_val):
280 281 """Does the opposite of expand_user, with its outputs.
281 282 """
282 283 if tilde_expand:
283 284 return path.replace(tilde_val, '~')
284 285 else:
285 286 return path
286 287
287 288
288 289 def completions_sorting_key(word):
289 290 """key for sorting completions
290 291
291 292 This does several things:
292 293
293 294 - Lowercase all completions, so they are sorted alphabetically with
294 295 upper and lower case words mingled
295 296 - Demote any completions starting with underscores to the end
296 297 - Insert any %magic and %%cellmagic completions in the alphabetical order
297 298 by their name
298 299 """
299 300 # Case insensitive sort
300 301 word = word.lower()
301 302
302 303 prio1, prio2 = 0, 0
303 304
304 305 if word.startswith('__'):
305 306 prio1 = 2
306 307 elif word.startswith('_'):
307 308 prio1 = 1
308 309
309 310 if word.endswith('='):
310 311 prio1 = -1
311 312
312 313 if word.startswith('%%'):
313 314 # If there's another % in there, this is something else, so leave it alone
314 315 if not "%" in word[2:]:
315 316 word = word[2:]
316 317 prio2 = 2
317 318 elif word.startswith('%'):
318 319 if not "%" in word[1:]:
319 320 word = word[1:]
320 321 prio2 = 1
321 322
322 323 return prio1, word, prio2
323 324
324 325
325 326 class _FakeJediCompletion:
326 327 """
327 328 This is a workaround to communicate to the UI that Jedi has crashed and to
328 329 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
329 330
330 331 Added in IPython 6.0 so should likely be removed for 7.0
331 332
332 333 """
333 334
334 335 def __init__(self, name):
335 336
336 337 self.name = name
337 338 self.complete = name
338 339 self.type = 'crashed'
339 340 self.name_with_symbols = name
340 341
341 342 def __repr__(self):
342 343 return '<Fake completion object jedi has crashed>'
343 344
344 345
345 346 class Completion:
346 347 """
347 348 Completion object used and return by IPython completers.
348 349
349 350 .. warning:: Unstable
350 351
351 352 This function is unstable, API may change without warning.
352 353 It will also raise unless use in proper context manager.
353 354
354 355 This act as a middle ground :any:`Completion` object between the
355 356 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
356 357 object. While Jedi need a lot of information about evaluator and how the
357 358 code should be ran/inspected, PromptToolkit (and other frontend) mostly
358 359 need user facing information.
359 360
360 361 - Which range should be replaced replaced by what.
361 362 - Some metadata (like completion type), or meta informations to displayed to
362 363 the use user.
363 364
364 365 For debugging purpose we can also store the origin of the completion (``jedi``,
365 366 ``IPython.python_matches``, ``IPython.magics_matches``...).
366 367 """
367 368
368 369 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='') -> None:
369 370 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
370 371 "It may change without warnings. "
371 372 "Use in corresponding context manager.",
372 373 category=ProvisionalCompleterWarning, stacklevel=2)
373 374
374 375 self.start = start
375 376 self.end = end
376 377 self.text = text
377 378 self.type = type
378 379 self._origin = _origin
379 380
380 381 def __repr__(self):
381 382 return '<Completion start=%s end=%s text=%r type=%r>' % (self.start, self.end, self.text, self.type or '?')
382 383
383 384 def __eq__(self, other)->Bool:
384 385 """
385 386 Equality and hash do not hash the type (as some completer may not be
386 387 able to infer the type), but are use to (partially) de-duplicate
387 388 completion.
388 389
389 390 Completely de-duplicating completion is a bit tricker that just
390 391 comparing as it depends on surrounding text, which Completions are not
391 392 aware of.
392 393 """
393 394 return self.start == other.start and \
394 395 self.end == other.end and \
395 396 self.text == other.text
396 397
397 398 def __hash__(self):
398 399 return hash((self.start, self.end, self.text))
399 400
400 401
401 402 _IC = Iterable[Completion]
402 403
403 404
404 405 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
405 406 """
406 407 Deduplicate a set of completions.
407 408
408 409 .. warning:: Unstable
409 410
410 411 This function is unstable, API may change without warning.
411 412
412 413 Parameters
413 414 ----------
414 415 text: str
415 416 text that should be completed.
416 417 completions: Iterator[Completion]
417 418 iterator over the completions to deduplicate
418 419
419 420
420 421 Completions coming from multiple sources, may be different but end up having
421 422 the same effect when applied to ``text``. If this is the case, this will
422 423 consider completions as equal and only emit the first encountered.
423 424
424 425 Not folded in `completions()` yet for debugging purpose, and to detect when
425 426 the IPython completer does return things that Jedi does not, but should be
426 427 at some point.
427 428 """
428 429 completions = list(completions)
429 430 if not completions:
430 431 return
431 432
432 433 new_start = min(c.start for c in completions)
433 434 new_end = max(c.end for c in completions)
434 435
435 436 seen = set()
436 437 for c in completions:
437 438 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
438 439 if new_text not in seen:
439 440 yield c
440 441 seen.add(new_text)
441 442
442 443
443 444 def rectify_completions(text: str, completions: _IC, *, _debug=False)->_IC:
444 445 """
445 446 Rectify a set of completions to all have the same ``start`` and ``end``
446 447
447 448 .. warning:: Unstable
448 449
449 450 This function is unstable, API may change without warning.
450 451 It will also raise unless use in proper context manager.
451 452
452 453 Parameters
453 454 ----------
454 455 text: str
455 456 text that should be completed.
456 457 completions: Iterator[Completion]
457 458 iterator over the completions to rectify
458 459
459 460
460 461 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
461 462 the Jupyter Protocol requires them to behave like so. This will readjust
462 463 the completion to have the same ``start`` and ``end`` by padding both
463 464 extremities with surrounding text.
464 465
465 466 During stabilisation should support a ``_debug`` option to log which
466 467 completion are return by the IPython completer and not found in Jedi in
467 468 order to make upstream bug report.
468 469 """
469 470 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
470 471 "It may change without warnings. "
471 472 "Use in corresponding context manager.",
472 473 category=ProvisionalCompleterWarning, stacklevel=2)
473 474
474 475 completions = list(completions)
475 476 if not completions:
476 477 return
477 478 starts = (c.start for c in completions)
478 479 ends = (c.end for c in completions)
479 480
480 481 new_start = min(starts)
481 482 new_end = max(ends)
482 483
483 484 seen_jedi = set()
484 485 seen_python_matches = set()
485 486 for c in completions:
486 487 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
487 488 if c._origin == 'jedi':
488 489 seen_jedi.add(new_text)
489 490 elif c._origin == 'IPCompleter.python_matches':
490 491 seen_python_matches.add(new_text)
491 492 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin)
492 493 diff = seen_python_matches.difference(seen_jedi)
493 494 if diff and _debug:
494 495 print('IPython.python matches have extras:', diff)
495 496
496 497
497 498 if sys.platform == 'win32':
498 499 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
499 500 else:
500 501 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
501 502
502 503 GREEDY_DELIMS = ' =\r\n'
503 504
504 505
505 506 class CompletionSplitter(object):
506 507 """An object to split an input line in a manner similar to readline.
507 508
508 509 By having our own implementation, we can expose readline-like completion in
509 510 a uniform manner to all frontends. This object only needs to be given the
510 511 line of text to be split and the cursor position on said line, and it
511 512 returns the 'word' to be completed on at the cursor after splitting the
512 513 entire line.
513 514
514 515 What characters are used as splitting delimiters can be controlled by
515 516 setting the ``delims`` attribute (this is a property that internally
516 517 automatically builds the necessary regular expression)"""
517 518
518 519 # Private interface
519 520
520 521 # A string of delimiter characters. The default value makes sense for
521 522 # IPython's most typical usage patterns.
522 523 _delims = DELIMS
523 524
524 525 # The expression (a normal string) to be compiled into a regular expression
525 526 # for actual splitting. We store it as an attribute mostly for ease of
526 527 # debugging, since this type of code can be so tricky to debug.
527 528 _delim_expr = None
528 529
529 530 # The regular expression that does the actual splitting
530 531 _delim_re = None
531 532
532 533 def __init__(self, delims=None):
533 534 delims = CompletionSplitter._delims if delims is None else delims
534 535 self.delims = delims
535 536
536 537 @property
537 538 def delims(self):
538 539 """Return the string of delimiter characters."""
539 540 return self._delims
540 541
541 542 @delims.setter
542 543 def delims(self, delims):
543 544 """Set the delimiters for line splitting."""
544 545 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
545 546 self._delim_re = re.compile(expr)
546 547 self._delims = delims
547 548 self._delim_expr = expr
548 549
549 550 def split_line(self, line, cursor_pos=None):
550 551 """Split a line of text with a cursor at the given position.
551 552 """
552 553 l = line if cursor_pos is None else line[:cursor_pos]
553 554 return self._delim_re.split(l)[-1]
554 555
555 556
556 557
557 558 class Completer(Configurable):
558 559
559 560 greedy = Bool(False,
560 561 help="""Activate greedy completion
561 562 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
562 563
563 564 This will enable completion on elements of lists, results of function calls, etc.,
564 565 but can be unsafe because the code is actually evaluated on TAB.
565 566 """
566 567 ).tag(config=True)
567 568
568 569 use_jedi = Bool(default_value=JEDI_INSTALLED,
569 570 help="Experimental: Use Jedi to generate autocompletions. "
570 571 "Default to True if jedi is installed").tag(config=True)
571 572
572 573 jedi_compute_type_timeout = Int(default_value=400,
573 574 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
574 575 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
575 576 performance by preventing jedi to build its cache.
576 577 """).tag(config=True)
577 578
578 579 debug = Bool(default_value=False,
579 580 help='Enable debug for the Completer. Mostly print extra '
580 581 'information for experimental jedi integration.')\
581 582 .tag(config=True)
582 583
583 584 backslash_combining_completions = Bool(True,
584 585 help="Enable unicode completions, e.g. \\alpha<tab> . "
585 586 "Includes completion of latex commands, unicode names, and expanding "
586 587 "unicode characters back to latex commands.").tag(config=True)
587 588
588 589
589 590
590 591 def __init__(self, namespace=None, global_namespace=None, **kwargs):
591 592 """Create a new completer for the command line.
592 593
593 594 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
594 595
595 596 If unspecified, the default namespace where completions are performed
596 597 is __main__ (technically, __main__.__dict__). Namespaces should be
597 598 given as dictionaries.
598 599
599 600 An optional second namespace can be given. This allows the completer
600 601 to handle cases where both the local and global scopes need to be
601 602 distinguished.
602 603 """
603 604
604 605 # Don't bind to namespace quite yet, but flag whether the user wants a
605 606 # specific namespace or to use __main__.__dict__. This will allow us
606 607 # to bind to __main__.__dict__ at completion time, not now.
607 608 if namespace is None:
608 609 self.use_main_ns = True
609 610 else:
610 611 self.use_main_ns = False
611 612 self.namespace = namespace
612 613
613 614 # The global namespace, if given, can be bound directly
614 615 if global_namespace is None:
615 616 self.global_namespace = {}
616 617 else:
617 618 self.global_namespace = global_namespace
618 619
619 620 super(Completer, self).__init__(**kwargs)
620 621
621 622 def complete(self, text, state):
622 623 """Return the next possible completion for 'text'.
623 624
624 625 This is called successively with state == 0, 1, 2, ... until it
625 626 returns None. The completion should begin with 'text'.
626 627
627 628 """
628 629 if self.use_main_ns:
629 630 self.namespace = __main__.__dict__
630 631
631 632 if state == 0:
632 633 if "." in text:
633 634 self.matches = self.attr_matches(text)
634 635 else:
635 636 self.matches = self.global_matches(text)
636 637 try:
637 638 return self.matches[state]
638 639 except IndexError:
639 640 return None
640 641
641 642 def global_matches(self, text):
642 643 """Compute matches when text is a simple name.
643 644
644 645 Return a list of all keywords, built-in functions and names currently
645 646 defined in self.namespace or self.global_namespace that match.
646 647
647 648 """
648 649 matches = []
649 650 match_append = matches.append
650 651 n = len(text)
651 652 for lst in [keyword.kwlist,
652 653 builtin_mod.__dict__.keys(),
653 654 self.namespace.keys(),
654 655 self.global_namespace.keys()]:
655 656 for word in lst:
656 657 if word[:n] == text and word != "__builtins__":
657 658 match_append(word)
658 659
659 660 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
660 661 for lst in [self.namespace.keys(),
661 662 self.global_namespace.keys()]:
662 663 shortened = {"_".join([sub[0] for sub in word.split('_')]) : word
663 664 for word in lst if snake_case_re.match(word)}
664 665 for word in shortened.keys():
665 666 if word[:n] == text and word != "__builtins__":
666 667 match_append(shortened[word])
667 668 return matches
668 669
669 670 def attr_matches(self, text):
670 671 """Compute matches when text contains a dot.
671 672
672 673 Assuming the text is of the form NAME.NAME....[NAME], and is
673 674 evaluatable in self.namespace or self.global_namespace, it will be
674 675 evaluated and its attributes (as revealed by dir()) are used as
675 676 possible completions. (For class instances, class members are are
676 677 also considered.)
677 678
678 679 WARNING: this can still invoke arbitrary C code, if an object
679 680 with a __getattr__ hook is evaluated.
680 681
681 682 """
682 683
683 684 # Another option, seems to work great. Catches things like ''.<tab>
684 685 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
685 686
686 687 if m:
687 688 expr, attr = m.group(1, 3)
688 689 elif self.greedy:
689 690 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
690 691 if not m2:
691 692 return []
692 693 expr, attr = m2.group(1,2)
693 694 else:
694 695 return []
695 696
696 697 try:
697 698 obj = eval(expr, self.namespace)
698 699 except:
699 700 try:
700 701 obj = eval(expr, self.global_namespace)
701 702 except:
702 703 return []
703 704
704 705 if self.limit_to__all__ and hasattr(obj, '__all__'):
705 706 words = get__all__entries(obj)
706 707 else:
707 708 words = dir2(obj)
708 709
709 710 try:
710 711 words = generics.complete_object(obj, words)
711 712 except TryNext:
712 713 pass
713 714 except AssertionError:
714 715 raise
715 716 except Exception:
716 717 # Silence errors from completion function
717 718 #raise # dbg
718 719 pass
719 720 # Build match list to return
720 721 n = len(attr)
721 722 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
722 723
723 724
724 725 def get__all__entries(obj):
725 726 """returns the strings in the __all__ attribute"""
726 727 try:
727 728 words = getattr(obj, '__all__')
728 729 except:
729 730 return []
730 731
731 732 return [w for w in words if isinstance(w, str)]
732 733
733 734
734 735 def match_dict_keys(keys: List[str], prefix: str, delims: str):
735 736 """Used by dict_key_matches, matching the prefix to a list of keys
736 737
737 738 Parameters
738 739 ==========
739 740 keys:
740 741 list of keys in dictionary currently being completed.
741 742 prefix:
742 743 Part of the text already typed by the user. e.g. `mydict[b'fo`
743 744 delims:
744 745 String of delimiters to consider when finding the current key.
745 746
746 747 Returns
747 748 =======
748 749
749 750 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
750 751 ``quote`` being the quote that need to be used to close current string.
751 752 ``token_start`` the position where the replacement should start occurring,
752 753 ``matches`` a list of replacement/completion
753 754
754 755 """
755 756 if not prefix:
756 757 return None, 0, [repr(k) for k in keys
757 758 if isinstance(k, (str, bytes))]
758 759 quote_match = re.search('["\']', prefix)
759 760 quote = quote_match.group()
760 761 try:
761 762 prefix_str = eval(prefix + quote, {})
762 763 except Exception:
763 764 return None, 0, []
764 765
765 766 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
766 767 token_match = re.search(pattern, prefix, re.UNICODE)
767 768 token_start = token_match.start()
768 769 token_prefix = token_match.group()
769 770
770 771 matched = []
771 772 for key in keys:
772 773 try:
773 774 if not key.startswith(prefix_str):
774 775 continue
775 776 except (AttributeError, TypeError, UnicodeError):
776 777 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
777 778 continue
778 779
779 780 # reformat remainder of key to begin with prefix
780 781 rem = key[len(prefix_str):]
781 782 # force repr wrapped in '
782 783 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
783 784 if rem_repr.startswith('u') and prefix[0] not in 'uU':
784 785 # Found key is unicode, but prefix is Py2 string.
785 786 # Therefore attempt to interpret key as string.
786 787 try:
787 788 rem_repr = repr(rem.encode('ascii') + '"')
788 789 except UnicodeEncodeError:
789 790 continue
790 791
791 792 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
792 793 if quote == '"':
793 794 # The entered prefix is quoted with ",
794 795 # but the match is quoted with '.
795 796 # A contained " hence needs escaping for comparison:
796 797 rem_repr = rem_repr.replace('"', '\\"')
797 798
798 799 # then reinsert prefix from start of token
799 800 matched.append('%s%s' % (token_prefix, rem_repr))
800 801 return quote, token_start, matched
801 802
802 803
803 804 def cursor_to_position(text:str, line:int, column:int)->int:
804 805 """
805 806
806 807 Convert the (line,column) position of the cursor in text to an offset in a
807 808 string.
808 809
809 810 Parameters
810 811 ----------
811 812
812 813 text : str
813 814 The text in which to calculate the cursor offset
814 815 line : int
815 816 Line of the cursor; 0-indexed
816 817 column : int
817 818 Column of the cursor 0-indexed
818 819
819 820 Return
820 821 ------
821 822 Position of the cursor in ``text``, 0-indexed.
822 823
823 824 See Also
824 825 --------
825 826 position_to_cursor: reciprocal of this function
826 827
827 828 """
828 829 lines = text.split('\n')
829 830 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
830 831
831 832 return sum(len(l) + 1 for l in lines[:line]) + column
832 833
833 834 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
834 835 """
835 836 Convert the position of the cursor in text (0 indexed) to a line
836 837 number(0-indexed) and a column number (0-indexed) pair
837 838
838 839 Position should be a valid position in ``text``.
839 840
840 841 Parameters
841 842 ----------
842 843
843 844 text : str
844 845 The text in which to calculate the cursor offset
845 846 offset : int
846 847 Position of the cursor in ``text``, 0-indexed.
847 848
848 849 Return
849 850 ------
850 851 (line, column) : (int, int)
851 852 Line of the cursor; 0-indexed, column of the cursor 0-indexed
852 853
853 854
854 855 See Also
855 856 --------
856 857 cursor_to_position : reciprocal of this function
857 858
858 859
859 860 """
860 861
861 862 assert 0 < offset <= len(text) , "0 < %s <= %s" % (offset , len(text))
862 863
863 864 before = text[:offset]
864 865 blines = before.split('\n') # ! splitnes trim trailing \n
865 866 line = before.count('\n')
866 867 col = len(blines[-1])
867 868 return line, col
868 869
869 870
870 871 def _safe_isinstance(obj, module, class_name):
871 872 """Checks if obj is an instance of module.class_name if loaded
872 873 """
873 874 return (module in sys.modules and
874 875 isinstance(obj, getattr(import_module(module), class_name)))
875 876
876 877
877 878 def back_unicode_name_matches(text):
878 879 u"""Match unicode characters back to unicode name
879 880
880 881 This does ``β˜ƒ`` -> ``\\snowman``
881 882
882 883 Note that snowman is not a valid python3 combining character but will be expanded.
883 884 Though it will not recombine back to the snowman character by the completion machinery.
884 885
885 886 This will not either back-complete standard sequences like \\n, \\b ...
886 887
887 888 Used on Python 3 only.
888 889 """
889 890 if len(text)<2:
890 891 return u'', ()
891 892 maybe_slash = text[-2]
892 893 if maybe_slash != '\\':
893 894 return u'', ()
894 895
895 896 char = text[-1]
896 897 # no expand on quote for completion in strings.
897 898 # nor backcomplete standard ascii keys
898 899 if char in string.ascii_letters or char in ['"',"'"]:
899 900 return u'', ()
900 901 try :
901 902 unic = unicodedata.name(char)
902 903 return '\\'+char,['\\'+unic]
903 904 except KeyError:
904 905 pass
905 906 return u'', ()
906 907
907 908 def back_latex_name_matches(text:str):
908 909 """Match latex characters back to unicode name
909 910
910 911 This does ``\\β„΅`` -> ``\\aleph``
911 912
912 913 Used on Python 3 only.
913 914 """
914 915 if len(text)<2:
915 916 return u'', ()
916 917 maybe_slash = text[-2]
917 918 if maybe_slash != '\\':
918 919 return u'', ()
919 920
920 921
921 922 char = text[-1]
922 923 # no expand on quote for completion in strings.
923 924 # nor backcomplete standard ascii keys
924 925 if char in string.ascii_letters or char in ['"',"'"]:
925 926 return u'', ()
926 927 try :
927 928 latex = reverse_latex_symbol[char]
928 929 # '\\' replace the \ as well
929 930 return '\\'+char,[latex]
930 931 except KeyError:
931 932 pass
932 933 return u'', ()
933 934
934 935
935 936 class IPCompleter(Completer):
936 937 """Extension of the completer class with IPython-specific features"""
937 938
938 939 @observe('greedy')
939 940 def _greedy_changed(self, change):
940 941 """update the splitter and readline delims when greedy is changed"""
941 942 if change['new']:
942 943 self.splitter.delims = GREEDY_DELIMS
943 944 else:
944 945 self.splitter.delims = DELIMS
945 946
946 947 merge_completions = Bool(True,
947 948 help="""Whether to merge completion results into a single list
948 949
949 950 If False, only the completion results from the first non-empty
950 951 completer will be returned.
951 952 """
952 953 ).tag(config=True)
953 954 omit__names = Enum((0,1,2), default_value=2,
954 955 help="""Instruct the completer to omit private method names
955 956
956 957 Specifically, when completing on ``object.<tab>``.
957 958
958 959 When 2 [default]: all names that start with '_' will be excluded.
959 960
960 961 When 1: all 'magic' names (``__foo__``) will be excluded.
961 962
962 963 When 0: nothing will be excluded.
963 964 """
964 965 ).tag(config=True)
965 966 limit_to__all__ = Bool(False,
966 967 help="""
967 968 DEPRECATED as of version 5.0.
968 969
969 970 Instruct the completer to use __all__ for the completion
970 971
971 972 Specifically, when completing on ``object.<tab>``.
972 973
973 974 When True: only those names in obj.__all__ will be included.
974 975
975 976 When False [default]: the __all__ attribute is ignored
976 977 """,
977 978 ).tag(config=True)
978 979
979 980 @observe('limit_to__all__')
980 981 def _limit_to_all_changed(self, change):
981 982 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
982 983 'value has been deprecated since IPython 5.0, will be made to have '
983 984 'no effects and then removed in future version of IPython.',
984 985 UserWarning)
985 986
986 987 def __init__(self, shell=None, namespace=None, global_namespace=None,
987 988 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
988 989 """IPCompleter() -> completer
989 990
990 991 Return a completer object.
991 992
992 993 Parameters
993 994 ----------
994 995
995 996 shell
996 997 a pointer to the ipython shell itself. This is needed
997 998 because this completer knows about magic functions, and those can
998 999 only be accessed via the ipython instance.
999 1000
1000 1001 namespace : dict, optional
1001 1002 an optional dict where completions are performed.
1002 1003
1003 1004 global_namespace : dict, optional
1004 1005 secondary optional dict for completions, to
1005 1006 handle cases (such as IPython embedded inside functions) where
1006 1007 both Python scopes are visible.
1007 1008
1008 1009 use_readline : bool, optional
1009 1010 DEPRECATED, ignored since IPython 6.0, will have no effects
1010 1011 """
1011 1012
1012 1013 self.magic_escape = ESC_MAGIC
1013 1014 self.splitter = CompletionSplitter()
1014 1015
1015 1016 if use_readline is not _deprecation_readline_sentinel:
1016 1017 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
1017 1018 DeprecationWarning, stacklevel=2)
1018 1019
1019 1020 # _greedy_changed() depends on splitter and readline being defined:
1020 1021 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
1021 1022 config=config, **kwargs)
1022 1023
1023 1024 # List where completion matches will be stored
1024 1025 self.matches = []
1025 1026 self.shell = shell
1026 1027 # Regexp to split filenames with spaces in them
1027 1028 self.space_name_re = re.compile(r'([^\\] )')
1028 1029 # Hold a local ref. to glob.glob for speed
1029 1030 self.glob = glob.glob
1030 1031
1031 1032 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1032 1033 # buffers, to avoid completion problems.
1033 1034 term = os.environ.get('TERM','xterm')
1034 1035 self.dumb_terminal = term in ['dumb','emacs']
1035 1036
1036 1037 # Special handling of backslashes needed in win32 platforms
1037 1038 if sys.platform == "win32":
1038 1039 self.clean_glob = self._clean_glob_win32
1039 1040 else:
1040 1041 self.clean_glob = self._clean_glob
1041 1042
1042 1043 #regexp to parse docstring for function signature
1043 1044 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1044 1045 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1045 1046 #use this if positional argument name is also needed
1046 1047 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1047 1048
1048 1049 # All active matcher routines for completion
1049 1050 self.matchers = [
1050 1051 self.python_matches,
1051 1052 self.file_matches,
1052 self.magic_config_matches,
1053 1053 self.magic_matches,
1054 1054 self.python_func_kw_matches,
1055 1055 self.dict_key_matches,
1056 1056 ]
1057 self.magic_arg_matchers = [
1058 self.magic_config_matches,
1059 self.magic_color_matches,
1060 ]
1057 1061
1058 1062 # This is set externally by InteractiveShell
1059 1063 self.custom_completers = None
1060 1064
1061 1065 def all_completions(self, text):
1062 1066 """
1063 1067 Wrapper around the complete method for the benefit of emacs.
1064 1068 """
1065 1069 return self.complete(text)[1]
1066 1070
1067 1071 def _clean_glob(self, text):
1068 1072 return self.glob("%s*" % text)
1069 1073
1070 1074 def _clean_glob_win32(self,text):
1071 1075 return [f.replace("\\","/")
1072 1076 for f in self.glob("%s*" % text)]
1073 1077
1074 1078 def file_matches(self, text):
1075 1079 """Match filenames, expanding ~USER type strings.
1076 1080
1077 1081 Most of the seemingly convoluted logic in this completer is an
1078 1082 attempt to handle filenames with spaces in them. And yet it's not
1079 1083 quite perfect, because Python's readline doesn't expose all of the
1080 1084 GNU readline details needed for this to be done correctly.
1081 1085
1082 1086 For a filename with a space in it, the printed completions will be
1083 1087 only the parts after what's already been typed (instead of the
1084 1088 full completions, as is normally done). I don't think with the
1085 1089 current (as of Python 2.3) Python readline it's possible to do
1086 1090 better."""
1087 1091
1088 1092 # chars that require escaping with backslash - i.e. chars
1089 1093 # that readline treats incorrectly as delimiters, but we
1090 1094 # don't want to treat as delimiters in filename matching
1091 1095 # when escaped with backslash
1092 1096 if text.startswith('!'):
1093 1097 text = text[1:]
1094 1098 text_prefix = u'!'
1095 1099 else:
1096 1100 text_prefix = u''
1097 1101
1098 1102 text_until_cursor = self.text_until_cursor
1099 1103 # track strings with open quotes
1100 1104 open_quotes = has_open_quotes(text_until_cursor)
1101 1105
1102 1106 if '(' in text_until_cursor or '[' in text_until_cursor:
1103 1107 lsplit = text
1104 1108 else:
1105 1109 try:
1106 1110 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1107 1111 lsplit = arg_split(text_until_cursor)[-1]
1108 1112 except ValueError:
1109 1113 # typically an unmatched ", or backslash without escaped char.
1110 1114 if open_quotes:
1111 1115 lsplit = text_until_cursor.split(open_quotes)[-1]
1112 1116 else:
1113 1117 return []
1114 1118 except IndexError:
1115 1119 # tab pressed on empty line
1116 1120 lsplit = ""
1117 1121
1118 1122 if not open_quotes and lsplit != protect_filename(lsplit):
1119 1123 # if protectables are found, do matching on the whole escaped name
1120 1124 has_protectables = True
1121 1125 text0,text = text,lsplit
1122 1126 else:
1123 1127 has_protectables = False
1124 1128 text = os.path.expanduser(text)
1125 1129
1126 1130 if text == "":
1127 1131 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1128 1132
1129 1133 # Compute the matches from the filesystem
1130 1134 if sys.platform == 'win32':
1131 1135 m0 = self.clean_glob(text)
1132 1136 else:
1133 1137 m0 = self.clean_glob(text.replace('\\', ''))
1134 1138
1135 1139 if has_protectables:
1136 1140 # If we had protectables, we need to revert our changes to the
1137 1141 # beginning of filename so that we don't double-write the part
1138 1142 # of the filename we have so far
1139 1143 len_lsplit = len(lsplit)
1140 1144 matches = [text_prefix + text0 +
1141 1145 protect_filename(f[len_lsplit:]) for f in m0]
1142 1146 else:
1143 1147 if open_quotes:
1144 1148 # if we have a string with an open quote, we don't need to
1145 1149 # protect the names beyond the quote (and we _shouldn't_, as
1146 1150 # it would cause bugs when the filesystem call is made).
1147 1151 matches = m0 if sys.platform == "win32" else\
1148 1152 [protect_filename(f, open_quotes) for f in m0]
1149 1153 else:
1150 1154 matches = [text_prefix +
1151 1155 protect_filename(f) for f in m0]
1152 1156
1153 1157 # Mark directories in input list by appending '/' to their names.
1154 1158 return [x+'/' if os.path.isdir(x) else x for x in matches]
1155 1159
1156 1160 def magic_matches(self, text):
1157 1161 """Match magics"""
1158 1162 # Get all shell magics now rather than statically, so magics loaded at
1159 1163 # runtime show up too.
1160 1164 lsm = self.shell.magics_manager.lsmagic()
1161 1165 line_magics = lsm['line']
1162 1166 cell_magics = lsm['cell']
1163 1167 pre = self.magic_escape
1164 1168 pre2 = pre+pre
1165 1169
1166 1170 # Completion logic:
1167 1171 # - user gives %%: only do cell magics
1168 1172 # - user gives %: do both line and cell magics
1169 1173 # - no prefix: do both
1170 1174 # In other words, line magics are skipped if the user gives %% explicitly
1171 1175 #
1172 1176 # We also exclude magics that match any currently visible names:
1173 1177 # https://github.com/ipython/ipython/issues/4877
1174 1178 bare_text = text.lstrip(pre)
1175 1179 global_matches = self.global_matches(bare_text)
1176 1180 matches = lambda magic: magic.startswith(bare_text) \
1177 1181 and magic not in global_matches
1178 1182 comp = [ pre2+m for m in cell_magics if matches(m)]
1179 1183 if not text.startswith(pre2):
1180 1184 comp += [ pre+m for m in line_magics if matches(m)]
1181 1185
1182 1186 return comp
1183 1187
1184 def magic_config_matches(self, text):
1188 def magic_config_matches(self, line_buffer):
1185 1189 """ Match class names and attributes for %config magic """
1186 # use line buffer instead of text (which is a word)
1187 texts = self.line_buffer.strip().split()
1190 texts = line_buffer.strip().split()
1188 1191
1189 if len(texts) > 0 and \
1190 ('config'.startswith(texts[0]) or '%config'.startswith(texts[0])):
1192 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
1191 1193 # get all configuration classes
1192 1194 classes = sorted(set([ c for c in self.shell.configurables
1193 1195 if c.__class__.class_traits(config=True)
1194 1196 ]), key=lambda x: x.__class__.__name__)
1195 1197 classnames = [ c.__class__.__name__ for c in classes ]
1196 1198
1197 1199 # return all classnames if config or %config is given
1198 1200 if len(texts) == 1:
1199 1201 return classnames
1200 1202
1201 1203 # match classname
1202 1204 classname_texts = texts[1].split('.')
1203 1205 classname = classname_texts[0]
1204 1206 classname_matches = [ c for c in classnames
1205 1207 if c.startswith(classname) ]
1206 1208
1207 1209 # return matched classes or the matched class with attributes
1208 1210 if texts[1].find('.') < 0:
1209 1211 return classname_matches
1210 1212 elif len(classname_matches) == 1 and \
1211 1213 classname_matches[0] == classname:
1212 1214 cls = classes[classnames.index(classname)].__class__
1213 1215 help = cls.class_get_help()
1214 1216 # strip leading '--' from cl-args:
1215 1217 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1216 1218 return [ attr.split('=')[0]
1217 1219 for attr in help.strip().splitlines()
1218 1220 if attr.startswith(texts[1]) ]
1219 1221 return []
1220 1222
1223 def magic_color_matches(self, line_buffer):
1224 """ Match color schemes for %colors magic"""
1225 texts = line_buffer.strip().split()
1226
1227 if len(texts) > 0 and (texts[0] == 'colors' or texts[0] == '%colors'):
1228 prefix = texts[1] if len(texts) > 1 else ''
1229 return [ color for color in InspectColors.keys()
1230 if color.startswith(prefix) ]
1231 return []
1232
1221 1233 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str):
1222 1234 """
1223 1235
1224 1236 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1225 1237 cursor position.
1226 1238
1227 1239 Parameters
1228 1240 ----------
1229 1241 cursor_column : int
1230 1242 column position of the cursor in ``text``, 0-indexed.
1231 1243 cursor_line : int
1232 1244 line position of the cursor in ``text``, 0-indexed
1233 1245 text : str
1234 1246 text to complete
1235 1247
1236 1248 Debugging
1237 1249 ---------
1238 1250
1239 1251 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1240 1252 object containing a string with the Jedi debug information attached.
1241 1253 """
1242 1254 namespaces = [self.namespace]
1243 1255 if self.global_namespace is not None:
1244 1256 namespaces.append(self.global_namespace)
1245 1257
1246 1258 completion_filter = lambda x:x
1247 1259 # cursor_pos is an it, jedi wants line and column
1248 1260 offset = cursor_to_position(text, cursor_line, cursor_column)
1249 1261 # filter output if we are completing for object members
1250 1262 if offset:
1251 1263 pre = text[offset-1]
1252 1264 if pre == '.':
1253 1265 if self.omit__names == 2:
1254 1266 completion_filter = lambda c:not c.name.startswith('_')
1255 1267 elif self.omit__names == 1:
1256 1268 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1257 1269 elif self.omit__names == 0:
1258 1270 completion_filter = lambda x:x
1259 1271 else:
1260 1272 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1261 1273
1262 1274 interpreter = jedi.Interpreter(
1263 1275 text, namespaces, column=cursor_column, line=cursor_line + 1)
1264 1276
1265 1277 try_jedi = False
1266 1278
1267 1279 try:
1268 1280 # should we check the type of the node is Error ?
1269 1281 from jedi.parser.tree import ErrorLeaf
1270 1282 next_to_last_tree = interpreter._get_module().tree_node.children[-2]
1271 1283 completing_string = False
1272 1284 if isinstance(next_to_last_tree, ErrorLeaf):
1273 1285 completing_string = interpreter._get_module().tree_node.children[-2].value[0] in {'"', "'"}
1274 1286 # if we are in a string jedi is likely not the right candidate for
1275 1287 # now. Skip it.
1276 1288 try_jedi = not completing_string
1277 1289 except Exception as e:
1278 1290 # many of things can go wrong, we are using private API just don't crash.
1279 1291 if self.debug:
1280 1292 print("Error detecting if completing a non-finished string :", e, '|')
1281 1293
1282 1294 if not try_jedi:
1283 1295 return []
1284 1296 try:
1285 1297 return filter(completion_filter, interpreter.completions())
1286 1298 except Exception as e:
1287 1299 if self.debug:
1288 1300 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1289 1301 else:
1290 1302 return []
1291 1303
1292 1304 def python_matches(self, text):
1293 1305 """Match attributes or global python names"""
1294 1306 if "." in text:
1295 1307 try:
1296 1308 matches = self.attr_matches(text)
1297 1309 if text.endswith('.') and self.omit__names:
1298 1310 if self.omit__names == 1:
1299 1311 # true if txt is _not_ a __ name, false otherwise:
1300 1312 no__name = (lambda txt:
1301 1313 re.match(r'.*\.__.*?__',txt) is None)
1302 1314 else:
1303 1315 # true if txt is _not_ a _ name, false otherwise:
1304 1316 no__name = (lambda txt:
1305 1317 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1306 1318 matches = filter(no__name, matches)
1307 1319 except NameError:
1308 1320 # catches <undefined attributes>.<tab>
1309 1321 matches = []
1310 1322 else:
1311 1323 matches = self.global_matches(text)
1312 1324 return matches
1313 1325
1314 1326 def _default_arguments_from_docstring(self, doc):
1315 1327 """Parse the first line of docstring for call signature.
1316 1328
1317 1329 Docstring should be of the form 'min(iterable[, key=func])\n'.
1318 1330 It can also parse cython docstring of the form
1319 1331 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1320 1332 """
1321 1333 if doc is None:
1322 1334 return []
1323 1335
1324 1336 #care only the firstline
1325 1337 line = doc.lstrip().splitlines()[0]
1326 1338
1327 1339 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1328 1340 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1329 1341 sig = self.docstring_sig_re.search(line)
1330 1342 if sig is None:
1331 1343 return []
1332 1344 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1333 1345 sig = sig.groups()[0].split(',')
1334 1346 ret = []
1335 1347 for s in sig:
1336 1348 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1337 1349 ret += self.docstring_kwd_re.findall(s)
1338 1350 return ret
1339 1351
1340 1352 def _default_arguments(self, obj):
1341 1353 """Return the list of default arguments of obj if it is callable,
1342 1354 or empty list otherwise."""
1343 1355 call_obj = obj
1344 1356 ret = []
1345 1357 if inspect.isbuiltin(obj):
1346 1358 pass
1347 1359 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1348 1360 if inspect.isclass(obj):
1349 1361 #for cython embededsignature=True the constructor docstring
1350 1362 #belongs to the object itself not __init__
1351 1363 ret += self._default_arguments_from_docstring(
1352 1364 getattr(obj, '__doc__', ''))
1353 1365 # for classes, check for __init__,__new__
1354 1366 call_obj = (getattr(obj, '__init__', None) or
1355 1367 getattr(obj, '__new__', None))
1356 1368 # for all others, check if they are __call__able
1357 1369 elif hasattr(obj, '__call__'):
1358 1370 call_obj = obj.__call__
1359 1371 ret += self._default_arguments_from_docstring(
1360 1372 getattr(call_obj, '__doc__', ''))
1361 1373
1362 1374 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1363 1375 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1364 1376
1365 1377 try:
1366 1378 sig = inspect.signature(call_obj)
1367 1379 ret.extend(k for k, v in sig.parameters.items() if
1368 1380 v.kind in _keeps)
1369 1381 except ValueError:
1370 1382 pass
1371 1383
1372 1384 return list(set(ret))
1373 1385
1374 1386 def python_func_kw_matches(self,text):
1375 1387 """Match named parameters (kwargs) of the last open function"""
1376 1388
1377 1389 if "." in text: # a parameter cannot be dotted
1378 1390 return []
1379 1391 try: regexp = self.__funcParamsRegex
1380 1392 except AttributeError:
1381 1393 regexp = self.__funcParamsRegex = re.compile(r'''
1382 1394 '.*?(?<!\\)' | # single quoted strings or
1383 1395 ".*?(?<!\\)" | # double quoted strings or
1384 1396 \w+ | # identifier
1385 1397 \S # other characters
1386 1398 ''', re.VERBOSE | re.DOTALL)
1387 1399 # 1. find the nearest identifier that comes before an unclosed
1388 1400 # parenthesis before the cursor
1389 1401 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1390 1402 tokens = regexp.findall(self.text_until_cursor)
1391 1403 iterTokens = reversed(tokens); openPar = 0
1392 1404
1393 1405 for token in iterTokens:
1394 1406 if token == ')':
1395 1407 openPar -= 1
1396 1408 elif token == '(':
1397 1409 openPar += 1
1398 1410 if openPar > 0:
1399 1411 # found the last unclosed parenthesis
1400 1412 break
1401 1413 else:
1402 1414 return []
1403 1415 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1404 1416 ids = []
1405 1417 isId = re.compile(r'\w+$').match
1406 1418
1407 1419 while True:
1408 1420 try:
1409 1421 ids.append(next(iterTokens))
1410 1422 if not isId(ids[-1]):
1411 1423 ids.pop(); break
1412 1424 if not next(iterTokens) == '.':
1413 1425 break
1414 1426 except StopIteration:
1415 1427 break
1416 1428
1417 1429 # Find all named arguments already assigned to, as to avoid suggesting
1418 1430 # them again
1419 1431 usedNamedArgs = set()
1420 1432 par_level = -1
1421 1433 for token, next_token in zip(tokens, tokens[1:]):
1422 1434 if token == '(':
1423 1435 par_level += 1
1424 1436 elif token == ')':
1425 1437 par_level -= 1
1426 1438
1427 1439 if par_level != 0:
1428 1440 continue
1429 1441
1430 1442 if next_token != '=':
1431 1443 continue
1432 1444
1433 1445 usedNamedArgs.add(token)
1434 1446
1435 1447 # lookup the candidate callable matches either using global_matches
1436 1448 # or attr_matches for dotted names
1437 1449 if len(ids) == 1:
1438 1450 callableMatches = self.global_matches(ids[0])
1439 1451 else:
1440 1452 callableMatches = self.attr_matches('.'.join(ids[::-1]))
1441 1453 argMatches = []
1442 1454 for callableMatch in callableMatches:
1443 1455 try:
1444 1456 namedArgs = self._default_arguments(eval(callableMatch,
1445 1457 self.namespace))
1446 1458 except:
1447 1459 continue
1448 1460
1449 1461 # Remove used named arguments from the list, no need to show twice
1450 1462 for namedArg in set(namedArgs) - usedNamedArgs:
1451 1463 if namedArg.startswith(text):
1452 1464 argMatches.append(u"%s=" %namedArg)
1453 1465 return argMatches
1454 1466
1455 1467 def dict_key_matches(self, text):
1456 1468 "Match string keys in a dictionary, after e.g. 'foo[' "
1457 1469 def get_keys(obj):
1458 1470 # Objects can define their own completions by defining an
1459 1471 # _ipy_key_completions_() method.
1460 1472 method = get_real_method(obj, '_ipython_key_completions_')
1461 1473 if method is not None:
1462 1474 return method()
1463 1475
1464 1476 # Special case some common in-memory dict-like types
1465 1477 if isinstance(obj, dict) or\
1466 1478 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1467 1479 try:
1468 1480 return list(obj.keys())
1469 1481 except Exception:
1470 1482 return []
1471 1483 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1472 1484 _safe_isinstance(obj, 'numpy', 'void'):
1473 1485 return obj.dtype.names or []
1474 1486 return []
1475 1487
1476 1488 try:
1477 1489 regexps = self.__dict_key_regexps
1478 1490 except AttributeError:
1479 1491 dict_key_re_fmt = r'''(?x)
1480 1492 ( # match dict-referring expression wrt greedy setting
1481 1493 %s
1482 1494 )
1483 1495 \[ # open bracket
1484 1496 \s* # and optional whitespace
1485 1497 ([uUbB]? # string prefix (r not handled)
1486 1498 (?: # unclosed string
1487 1499 '(?:[^']|(?<!\\)\\')*
1488 1500 |
1489 1501 "(?:[^"]|(?<!\\)\\")*
1490 1502 )
1491 1503 )?
1492 1504 $
1493 1505 '''
1494 1506 regexps = self.__dict_key_regexps = {
1495 1507 False: re.compile(dict_key_re_fmt % '''
1496 1508 # identifiers separated by .
1497 1509 (?!\d)\w+
1498 1510 (?:\.(?!\d)\w+)*
1499 1511 '''),
1500 1512 True: re.compile(dict_key_re_fmt % '''
1501 1513 .+
1502 1514 ''')
1503 1515 }
1504 1516
1505 1517 match = regexps[self.greedy].search(self.text_until_cursor)
1506 1518 if match is None:
1507 1519 return []
1508 1520
1509 1521 expr, prefix = match.groups()
1510 1522 try:
1511 1523 obj = eval(expr, self.namespace)
1512 1524 except Exception:
1513 1525 try:
1514 1526 obj = eval(expr, self.global_namespace)
1515 1527 except Exception:
1516 1528 return []
1517 1529
1518 1530 keys = get_keys(obj)
1519 1531 if not keys:
1520 1532 return keys
1521 1533 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1522 1534 if not matches:
1523 1535 return matches
1524 1536
1525 1537 # get the cursor position of
1526 1538 # - the text being completed
1527 1539 # - the start of the key text
1528 1540 # - the start of the completion
1529 1541 text_start = len(self.text_until_cursor) - len(text)
1530 1542 if prefix:
1531 1543 key_start = match.start(2)
1532 1544 completion_start = key_start + token_offset
1533 1545 else:
1534 1546 key_start = completion_start = match.end()
1535 1547
1536 1548 # grab the leading prefix, to make sure all completions start with `text`
1537 1549 if text_start > key_start:
1538 1550 leading = ''
1539 1551 else:
1540 1552 leading = text[text_start:completion_start]
1541 1553
1542 1554 # the index of the `[` character
1543 1555 bracket_idx = match.end(1)
1544 1556
1545 1557 # append closing quote and bracket as appropriate
1546 1558 # this is *not* appropriate if the opening quote or bracket is outside
1547 1559 # the text given to this method
1548 1560 suf = ''
1549 1561 continuation = self.line_buffer[len(self.text_until_cursor):]
1550 1562 if key_start > text_start and closing_quote:
1551 1563 # quotes were opened inside text, maybe close them
1552 1564 if continuation.startswith(closing_quote):
1553 1565 continuation = continuation[len(closing_quote):]
1554 1566 else:
1555 1567 suf += closing_quote
1556 1568 if bracket_idx > text_start:
1557 1569 # brackets were opened inside text, maybe close them
1558 1570 if not continuation.startswith(']'):
1559 1571 suf += ']'
1560 1572
1561 1573 return [leading + k + suf for k in matches]
1562 1574
1563 1575 def unicode_name_matches(self, text):
1564 1576 u"""Match Latex-like syntax for unicode characters base
1565 1577 on the name of the character.
1566 1578
1567 1579 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
1568 1580
1569 1581 Works only on valid python 3 identifier, or on combining characters that
1570 1582 will combine to form a valid identifier.
1571 1583
1572 1584 Used on Python 3 only.
1573 1585 """
1574 1586 slashpos = text.rfind('\\')
1575 1587 if slashpos > -1:
1576 1588 s = text[slashpos+1:]
1577 1589 try :
1578 1590 unic = unicodedata.lookup(s)
1579 1591 # allow combining chars
1580 1592 if ('a'+unic).isidentifier():
1581 1593 return '\\'+s,[unic]
1582 1594 except KeyError:
1583 1595 pass
1584 1596 return u'', []
1585 1597
1586 1598
1587 1599 def latex_matches(self, text):
1588 1600 u"""Match Latex syntax for unicode characters.
1589 1601
1590 1602 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
1591 1603
1592 1604 Used on Python 3 only.
1593 1605 """
1594 1606 slashpos = text.rfind('\\')
1595 1607 if slashpos > -1:
1596 1608 s = text[slashpos:]
1597 1609 if s in latex_symbols:
1598 1610 # Try to complete a full latex symbol to unicode
1599 1611 # \\alpha -> Ξ±
1600 1612 return s, [latex_symbols[s]]
1601 1613 else:
1602 1614 # If a user has partially typed a latex symbol, give them
1603 1615 # a full list of options \al -> [\aleph, \alpha]
1604 1616 matches = [k for k in latex_symbols if k.startswith(s)]
1605 1617 return s, matches
1606 1618 return u'', []
1607 1619
1608 1620 def dispatch_custom_completer(self, text):
1609 1621 if not self.custom_completers:
1610 1622 return
1611 1623
1612 1624 line = self.line_buffer
1613 1625 if not line.strip():
1614 1626 return None
1615 1627
1616 1628 # Create a little structure to pass all the relevant information about
1617 1629 # the current completion to any custom completer.
1618 1630 event = SimpleNamespace()
1619 1631 event.line = line
1620 1632 event.symbol = text
1621 1633 cmd = line.split(None,1)[0]
1622 1634 event.command = cmd
1623 1635 event.text_until_cursor = self.text_until_cursor
1624 1636
1625 1637 # for foo etc, try also to find completer for %foo
1626 1638 if not cmd.startswith(self.magic_escape):
1627 1639 try_magic = self.custom_completers.s_matches(
1628 1640 self.magic_escape + cmd)
1629 1641 else:
1630 1642 try_magic = []
1631 1643
1632 1644 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1633 1645 try_magic,
1634 1646 self.custom_completers.flat_matches(self.text_until_cursor)):
1635 1647 try:
1636 1648 res = c(event)
1637 1649 if res:
1638 1650 # first, try case sensitive match
1639 1651 withcase = [r for r in res if r.startswith(text)]
1640 1652 if withcase:
1641 1653 return withcase
1642 1654 # if none, then case insensitive ones are ok too
1643 1655 text_low = text.lower()
1644 1656 return [r for r in res if r.lower().startswith(text_low)]
1645 1657 except TryNext:
1646 1658 pass
1647 1659
1648 1660 return None
1649 1661
1650 1662 def completions(self, text: str, offset: int)->Iterator[Completion]:
1651 1663 """
1652 1664 Returns an iterator over the possible completions
1653 1665
1654 1666 .. warning:: Unstable
1655 1667
1656 1668 This function is unstable, API may change without warning.
1657 1669 It will also raise unless use in proper context manager.
1658 1670
1659 1671 Parameters
1660 1672 ----------
1661 1673
1662 1674 text:str
1663 1675 Full text of the current input, multi line string.
1664 1676 offset:int
1665 1677 Integer representing the position of the cursor in ``text``. Offset
1666 1678 is 0-based indexed.
1667 1679
1668 1680 Yields
1669 1681 ------
1670 1682 :any:`Completion` object
1671 1683
1672 1684
1673 1685 The cursor on a text can either be seen as being "in between"
1674 1686 characters or "On" a character depending on the interface visible to
1675 1687 the user. For consistency the cursor being on "in between" characters X
1676 1688 and Y is equivalent to the cursor being "on" character Y, that is to say
1677 1689 the character the cursor is on is considered as being after the cursor.
1678 1690
1679 1691 Combining characters may span more that one position in the
1680 1692 text.
1681 1693
1682 1694
1683 1695 .. note::
1684 1696
1685 1697 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1686 1698 fake Completion token to distinguish completion returned by Jedi
1687 1699 and usual IPython completion.
1688 1700
1689 1701 .. note::
1690 1702
1691 1703 Completions are not completely deduplicated yet. If identical
1692 1704 completions are coming from different sources this function does not
1693 1705 ensure that each completion object will only be present once.
1694 1706 """
1695 1707 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1696 1708 "It may change without warnings. "
1697 1709 "Use in corresponding context manager.",
1698 1710 category=ProvisionalCompleterWarning, stacklevel=2)
1699 1711
1700 1712 seen = set()
1701 1713 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1702 1714 if c and (c in seen):
1703 1715 continue
1704 1716 yield c
1705 1717 seen.add(c)
1706 1718
1707 1719 def _completions(self, full_text: str, offset: int, *, _timeout)->Iterator[Completion]:
1708 1720 """
1709 1721 Core completion module.Same signature as :any:`completions`, with the
1710 1722 extra `timeout` parameter (in seconds).
1711 1723
1712 1724
1713 1725 Computing jedi's completion ``.type`` can be quite expensive (it is a
1714 1726 lazy property) and can require some warm-up, more warm up than just
1715 1727 computing the ``name`` of a completion. The warm-up can be :
1716 1728
1717 1729 - Long warm-up the first time a module is encountered after
1718 1730 install/update: actually build parse/inference tree.
1719 1731
1720 1732 - first time the module is encountered in a session: load tree from
1721 1733 disk.
1722 1734
1723 1735 We don't want to block completions for tens of seconds so we give the
1724 1736 completer a "budget" of ``_timeout`` seconds per invocation to compute
1725 1737 completions types, the completions that have not yet been computed will
1726 1738 be marked as "unknown" an will have a chance to be computed next round
1727 1739 are things get cached.
1728 1740
1729 1741 Keep in mind that Jedi is not the only thing treating the completion so
1730 1742 keep the timeout short-ish as if we take more than 0.3 second we still
1731 1743 have lots of processing to do.
1732 1744
1733 1745 """
1734 1746 deadline = time.monotonic() + _timeout
1735 1747
1736 1748
1737 1749 before = full_text[:offset]
1738 1750 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1739 1751
1740 1752 matched_text, matches, matches_origin, jedi_matches = self._complete(
1741 1753 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1742 1754
1743 1755 iter_jm = iter(jedi_matches)
1744 1756 if _timeout:
1745 1757 for jm in iter_jm:
1746 1758 try:
1747 1759 type_ = jm.type
1748 1760 except Exception:
1749 1761 if self.debug:
1750 1762 print("Error in Jedi getting type of ", jm)
1751 1763 type_ = None
1752 1764 delta = len(jm.name_with_symbols) - len(jm.complete)
1753 1765 yield Completion(start=offset - delta,
1754 1766 end=offset,
1755 1767 text=jm.name_with_symbols,
1756 1768 type=type_,
1757 1769 _origin='jedi')
1758 1770
1759 1771 if time.monotonic() > deadline:
1760 1772 break
1761 1773
1762 1774 for jm in iter_jm:
1763 1775 delta = len(jm.name_with_symbols) - len(jm.complete)
1764 1776 yield Completion(start=offset - delta,
1765 1777 end=offset,
1766 1778 text=jm.name_with_symbols,
1767 1779 type='<unknown>', # don't compute type for speed
1768 1780 _origin='jedi')
1769 1781
1770 1782
1771 1783 start_offset = before.rfind(matched_text)
1772 1784
1773 1785 # TODO:
1774 1786 # Supress this, right now just for debug.
1775 1787 if jedi_matches and matches and self.debug:
1776 1788 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--', _origin='debug')
1777 1789
1778 1790 # I'm unsure if this is always true, so let's assert and see if it
1779 1791 # crash
1780 1792 assert before.endswith(matched_text)
1781 1793 for m, t in zip(matches, matches_origin):
1782 1794 yield Completion(start=start_offset, end=offset, text=m, _origin=t)
1783 1795
1784 1796
1785 1797 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1786 1798 """Find completions for the given text and line context.
1787 1799
1788 1800 Note that both the text and the line_buffer are optional, but at least
1789 1801 one of them must be given.
1790 1802
1791 1803 Parameters
1792 1804 ----------
1793 1805 text : string, optional
1794 1806 Text to perform the completion on. If not given, the line buffer
1795 1807 is split using the instance's CompletionSplitter object.
1796 1808
1797 1809 line_buffer : string, optional
1798 1810 If not given, the completer attempts to obtain the current line
1799 1811 buffer via readline. This keyword allows clients which are
1800 1812 requesting for text completions in non-readline contexts to inform
1801 1813 the completer of the entire text.
1802 1814
1803 1815 cursor_pos : int, optional
1804 1816 Index of the cursor in the full line buffer. Should be provided by
1805 1817 remote frontends where kernel has no access to frontend state.
1806 1818
1807 1819 Returns
1808 1820 -------
1809 1821 text : str
1810 1822 Text that was actually used in the completion.
1811 1823
1812 1824 matches : list
1813 1825 A list of completion matches.
1814 1826
1815 1827
1816 1828 .. note::
1817 1829
1818 1830 This API is likely to be deprecated and replaced by
1819 1831 :any:`IPCompleter.completions` in the future.
1820 1832
1821 1833
1822 1834 """
1823 1835 warnings.warn('`Completer.complete` is pending deprecation since '
1824 1836 'IPython 6.0 and will be replaced by `Completer.completions`.',
1825 1837 PendingDeprecationWarning)
1826 1838 # potential todo, FOLD the 3rd throw away argument of _complete
1827 1839 # into the first 2 one.
1828 1840 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
1829 1841
1830 1842 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
1831 1843 full_text=None, return_jedi_results=True) -> Tuple[str, List[str], List[str], Iterable[_FakeJediCompletion]]:
1832 1844 """
1833 1845
1834 1846 Like complete but can also returns raw jedi completions as well as the
1835 1847 origin of the completion text. This could (and should) be made much
1836 1848 cleaner but that will be simpler once we drop the old (and stateful)
1837 1849 :any:`complete` API.
1838 1850
1839 1851
1840 1852 With current provisional API, cursor_pos act both (depending on the
1841 1853 caller) as the offset in the ``text`` or ``line_buffer``, or as the
1842 1854 ``column`` when passing multiline strings this could/should be renamed
1843 1855 but would add extra noise.
1844 1856 """
1845 1857
1846 1858 # if the cursor position isn't given, the only sane assumption we can
1847 1859 # make is that it's at the end of the line (the common case)
1848 1860 if cursor_pos is None:
1849 1861 cursor_pos = len(line_buffer) if text is None else len(text)
1850 1862
1851 1863 if self.use_main_ns:
1852 1864 self.namespace = __main__.__dict__
1853 1865
1854 1866 # if text is either None or an empty string, rely on the line buffer
1855 1867 if (not line_buffer) and full_text:
1856 1868 line_buffer = full_text.split('\n')[cursor_line]
1857 1869 if not text:
1858 1870 text = self.splitter.split_line(line_buffer, cursor_pos)
1859 1871
1860 1872 if self.backslash_combining_completions:
1861 1873 # allow deactivation of these on windows.
1862 1874 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1863 1875 latex_text, latex_matches = self.latex_matches(base_text)
1864 1876 if latex_matches:
1865 1877 return latex_text, latex_matches, ['latex_matches']*len(latex_matches), ()
1866 1878 name_text = ''
1867 1879 name_matches = []
1868 1880 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1869 1881 name_text, name_matches = meth(base_text)
1870 1882 if name_text:
1871 1883 return name_text, name_matches, [meth.__qualname__]*len(name_matches), ()
1872 1884
1873 1885
1874 1886 # If no line buffer is given, assume the input text is all there was
1875 1887 if line_buffer is None:
1876 1888 line_buffer = text
1877 1889
1878 1890 self.line_buffer = line_buffer
1879 1891 self.text_until_cursor = self.line_buffer[:cursor_pos]
1880 1892
1893 # Do magic arg matches
1894 for matcher in self.magic_arg_matchers:
1895 matches = [(m, matcher.__qualname__) for m in matcher(line_buffer)]
1896 if matches:
1897 matches2 = [m[0] for m in matches]
1898 origins = [m[1] for m in matches]
1899 return text, matches2, origins, {}
1900
1881 1901 # Start with a clean slate of completions
1882 1902 matches = []
1883 1903 custom_res = self.dispatch_custom_completer(text)
1884 1904 # FIXME: we should extend our api to return a dict with completions for
1885 1905 # different types of objects. The rlcomplete() method could then
1886 1906 # simply collapse the dict into a list for readline, but we'd have
1887 1907 # richer completion semantics in other evironments.
1888 1908 completions = ()
1889 1909 if self.use_jedi and return_jedi_results:
1890 1910 if not full_text:
1891 1911 full_text = line_buffer
1892 1912 completions = self._jedi_matches(
1893 1913 cursor_pos, cursor_line, full_text)
1894 1914 if custom_res is not None:
1895 1915 # did custom completers produce something?
1896 1916 matches = [(m, 'custom') for m in custom_res]
1897 1917 else:
1898 1918 # Extend the list of completions with the results of each
1899 1919 # matcher, so we return results to the user from all
1900 1920 # namespaces.
1901 1921 if self.merge_completions:
1902 1922 matches = []
1903 1923 for matcher in self.matchers:
1904 1924 try:
1905 1925 matches.extend([(m, matcher.__qualname__)
1906 1926 for m in matcher(text)])
1907 1927 except:
1908 1928 # Show the ugly traceback if the matcher causes an
1909 1929 # exception, but do NOT crash the kernel!
1910 1930 sys.excepthook(*sys.exc_info())
1911 1931 else:
1912 1932 for matcher in self.matchers:
1913 1933 matches = [(m, matcher.__qualname__)
1914 1934 for m in matcher(text)]
1915 1935 if matches:
1916 1936 break
1917 1937 seen = set()
1918 1938 filtered_matches = set()
1919 1939 for m in matches:
1920 1940 t, c = m
1921 1941 if t not in seen:
1922 1942 filtered_matches.add(m)
1923 1943 seen.add(t)
1924 1944
1925 1945 _filtered_matches = sorted(
1926 1946 set(filtered_matches), key=lambda x: completions_sorting_key(x[0]))
1927 1947
1928 1948 _matches = [m[0] for m in _filtered_matches]
1929 1949 origins = [m[1] for m in _filtered_matches]
1930 1950
1931 1951 self.matches = _matches
1932 1952
1933 1953 return text, _matches, origins, completions
@@ -1,930 +1,949 b''
1 1 # encoding: utf-8
2 2 """Tests for the IPython tab-completion machinery."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 import os
8 8 import sys
9 9 import textwrap
10 10 import unittest
11 11
12 12 from contextlib import contextmanager
13 13
14 14 import nose.tools as nt
15 15
16 16 from traitlets.config.loader import Config
17 17 from IPython import get_ipython
18 18 from IPython.core import completer
19 19 from IPython.external.decorators import knownfailureif
20 20 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
21 21 from IPython.utils.generics import complete_object
22 22 from IPython.testing import decorators as dec
23 23
24 24 from IPython.core.completer import (
25 25 Completion, provisionalcompleter, match_dict_keys, _deduplicate_completions)
26 26 from nose.tools import assert_in, assert_not_in
27 27
28 28 #-----------------------------------------------------------------------------
29 29 # Test functions
30 30 #-----------------------------------------------------------------------------
31 31
32 32 @contextmanager
33 33 def greedy_completion():
34 34 ip = get_ipython()
35 35 greedy_original = ip.Completer.greedy
36 36 try:
37 37 ip.Completer.greedy = True
38 38 yield
39 39 finally:
40 40 ip.Completer.greedy = greedy_original
41 41
42 42 def test_protect_filename():
43 43 if sys.platform == 'win32':
44 44 pairs = [('abc','abc'),
45 45 (' abc','" abc"'),
46 46 ('a bc','"a bc"'),
47 47 ('a bc','"a bc"'),
48 48 (' bc','" bc"'),
49 49 ]
50 50 else:
51 51 pairs = [('abc','abc'),
52 52 (' abc',r'\ abc'),
53 53 ('a bc',r'a\ bc'),
54 54 ('a bc',r'a\ \ bc'),
55 55 (' bc',r'\ \ bc'),
56 56 # On posix, we also protect parens and other special characters.
57 57 ('a(bc',r'a\(bc'),
58 58 ('a)bc',r'a\)bc'),
59 59 ('a( )bc',r'a\(\ \)bc'),
60 60 ('a[1]bc', r'a\[1\]bc'),
61 61 ('a{1}bc', r'a\{1\}bc'),
62 62 ('a#bc', r'a\#bc'),
63 63 ('a?bc', r'a\?bc'),
64 64 ('a=bc', r'a\=bc'),
65 65 ('a\\bc', r'a\\bc'),
66 66 ('a|bc', r'a\|bc'),
67 67 ('a;bc', r'a\;bc'),
68 68 ('a:bc', r'a\:bc'),
69 69 ("a'bc", r"a\'bc"),
70 70 ('a*bc', r'a\*bc'),
71 71 ('a"bc', r'a\"bc'),
72 72 ('a^bc', r'a\^bc'),
73 73 ('a&bc', r'a\&bc'),
74 74 ]
75 75 # run the actual tests
76 76 for s1, s2 in pairs:
77 77 s1p = completer.protect_filename(s1)
78 78 nt.assert_equal(s1p, s2)
79 79
80 80
81 81 def check_line_split(splitter, test_specs):
82 82 for part1, part2, split in test_specs:
83 83 cursor_pos = len(part1)
84 84 line = part1+part2
85 85 out = splitter.split_line(line, cursor_pos)
86 86 nt.assert_equal(out, split)
87 87
88 88
89 89 def test_line_split():
90 90 """Basic line splitter test with default specs."""
91 91 sp = completer.CompletionSplitter()
92 92 # The format of the test specs is: part1, part2, expected answer. Parts 1
93 93 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
94 94 # was at the end of part1. So an empty part2 represents someone hitting
95 95 # tab at the end of the line, the most common case.
96 96 t = [('run some/scrip', '', 'some/scrip'),
97 97 ('run scripts/er', 'ror.py foo', 'scripts/er'),
98 98 ('echo $HOM', '', 'HOM'),
99 99 ('print sys.pa', '', 'sys.pa'),
100 100 ('print(sys.pa', '', 'sys.pa'),
101 101 ("execfile('scripts/er", '', 'scripts/er'),
102 102 ('a[x.', '', 'x.'),
103 103 ('a[x.', 'y', 'x.'),
104 104 ('cd "some_file/', '', 'some_file/'),
105 105 ]
106 106 check_line_split(sp, t)
107 107 # Ensure splitting works OK with unicode by re-running the tests with
108 108 # all inputs turned into unicode
109 109 check_line_split(sp, [ map(str, p) for p in t] )
110 110
111 111
112 112 def test_custom_completion_error():
113 113 """Test that errors from custom attribute completers are silenced."""
114 114 ip = get_ipython()
115 115 class A(object): pass
116 116 ip.user_ns['a'] = A()
117 117
118 118 @complete_object.when_type(A)
119 119 def complete_A(a, existing_completions):
120 120 raise TypeError("this should be silenced")
121 121
122 122 ip.complete("a.")
123 123
124 124
125 125 def test_unicode_completions():
126 126 ip = get_ipython()
127 127 # Some strings that trigger different types of completion. Check them both
128 128 # in str and unicode forms
129 129 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
130 130 for t in s + list(map(str, s)):
131 131 # We don't need to check exact completion values (they may change
132 132 # depending on the state of the namespace, but at least no exceptions
133 133 # should be thrown and the return value should be a pair of text, list
134 134 # values.
135 135 text, matches = ip.complete(t)
136 136 nt.assert_true(isinstance(text, str))
137 137 nt.assert_true(isinstance(matches, list))
138 138
139 139 def test_latex_completions():
140 140 from IPython.core.latex_symbols import latex_symbols
141 141 import random
142 142 ip = get_ipython()
143 143 # Test some random unicode symbols
144 144 keys = random.sample(latex_symbols.keys(), 10)
145 145 for k in keys:
146 146 text, matches = ip.complete(k)
147 147 nt.assert_equal(len(matches),1)
148 148 nt.assert_equal(text, k)
149 149 nt.assert_equal(matches[0], latex_symbols[k])
150 150 # Test a more complex line
151 151 text, matches = ip.complete(u'print(\\alpha')
152 152 nt.assert_equal(text, u'\\alpha')
153 153 nt.assert_equal(matches[0], latex_symbols['\\alpha'])
154 154 # Test multiple matching latex symbols
155 155 text, matches = ip.complete(u'\\al')
156 156 nt.assert_in('\\alpha', matches)
157 157 nt.assert_in('\\aleph', matches)
158 158
159 159
160 160
161 161
162 162 def test_back_latex_completion():
163 163 ip = get_ipython()
164 164
165 165 # do not return more than 1 matches fro \beta, only the latex one.
166 166 name, matches = ip.complete('\\Ξ²')
167 167 nt.assert_equal(len(matches), 1)
168 168 nt.assert_equal(matches[0], '\\beta')
169 169
170 170 def test_back_unicode_completion():
171 171 ip = get_ipython()
172 172
173 173 name, matches = ip.complete('\\β…€')
174 174 nt.assert_equal(len(matches), 1)
175 175 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
176 176
177 177
178 178 def test_forward_unicode_completion():
179 179 ip = get_ipython()
180 180
181 181 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
182 182 nt.assert_equal(len(matches), 1)
183 183 nt.assert_equal(matches[0], 'β…€')
184 184
185 185 @dec.knownfailureif(sys.platform == 'win32', 'Fails if there is a C:\\j... path')
186 186 def test_no_ascii_back_completion():
187 187 ip = get_ipython()
188 188 with TemporaryWorkingDirectory(): # Avoid any filename completions
189 189 # single ascii letter that don't have yet completions
190 190 for letter in 'jJ' :
191 191 name, matches = ip.complete('\\'+letter)
192 192 nt.assert_equal(matches, [])
193 193
194 194
195 195
196 196
197 197 class CompletionSplitterTestCase(unittest.TestCase):
198 198 def setUp(self):
199 199 self.sp = completer.CompletionSplitter()
200 200
201 201 def test_delim_setting(self):
202 202 self.sp.delims = ' '
203 203 nt.assert_equal(self.sp.delims, ' ')
204 204 nt.assert_equal(self.sp._delim_expr, '[\ ]')
205 205
206 206 def test_spaces(self):
207 207 """Test with only spaces as split chars."""
208 208 self.sp.delims = ' '
209 209 t = [('foo', '', 'foo'),
210 210 ('run foo', '', 'foo'),
211 211 ('run foo', 'bar', 'foo'),
212 212 ]
213 213 check_line_split(self.sp, t)
214 214
215 215
216 216 def test_has_open_quotes1():
217 217 for s in ["'", "'''", "'hi' '"]:
218 218 nt.assert_equal(completer.has_open_quotes(s), "'")
219 219
220 220
221 221 def test_has_open_quotes2():
222 222 for s in ['"', '"""', '"hi" "']:
223 223 nt.assert_equal(completer.has_open_quotes(s), '"')
224 224
225 225
226 226 def test_has_open_quotes3():
227 227 for s in ["''", "''' '''", "'hi' 'ipython'"]:
228 228 nt.assert_false(completer.has_open_quotes(s))
229 229
230 230
231 231 def test_has_open_quotes4():
232 232 for s in ['""', '""" """', '"hi" "ipython"']:
233 233 nt.assert_false(completer.has_open_quotes(s))
234 234
235 235
236 236 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
237 237 def test_abspath_file_completions():
238 238 ip = get_ipython()
239 239 with TemporaryDirectory() as tmpdir:
240 240 prefix = os.path.join(tmpdir, 'foo')
241 241 suffixes = ['1', '2']
242 242 names = [prefix+s for s in suffixes]
243 243 for n in names:
244 244 open(n, 'w').close()
245 245
246 246 # Check simple completion
247 247 c = ip.complete(prefix)[1]
248 248 nt.assert_equal(c, names)
249 249
250 250 # Now check with a function call
251 251 cmd = 'a = f("%s' % prefix
252 252 c = ip.complete(prefix, cmd)[1]
253 253 comp = [prefix+s for s in suffixes]
254 254 nt.assert_equal(c, comp)
255 255
256 256
257 257 def test_local_file_completions():
258 258 ip = get_ipython()
259 259 with TemporaryWorkingDirectory():
260 260 prefix = './foo'
261 261 suffixes = ['1', '2']
262 262 names = [prefix+s for s in suffixes]
263 263 for n in names:
264 264 open(n, 'w').close()
265 265
266 266 # Check simple completion
267 267 c = ip.complete(prefix)[1]
268 268 nt.assert_equal(c, names)
269 269
270 270 # Now check with a function call
271 271 cmd = 'a = f("%s' % prefix
272 272 c = ip.complete(prefix, cmd)[1]
273 273 comp = set(prefix+s for s in suffixes)
274 274 nt.assert_true(comp.issubset(set(c)))
275 275
276 276
277 277 def test_quoted_file_completions():
278 278 ip = get_ipython()
279 279 with TemporaryWorkingDirectory():
280 280 name = "foo'bar"
281 281 open(name, 'w').close()
282 282
283 283 # Don't escape Windows
284 284 escaped = name if sys.platform == "win32" else "foo\\'bar"
285 285
286 286 # Single quote matches embedded single quote
287 287 text = "open('foo"
288 288 c = ip.Completer._complete(cursor_line=0,
289 289 cursor_pos=len(text),
290 290 full_text=text)[1]
291 291 nt.assert_equal(c, [escaped])
292 292
293 293 # Double quote requires no escape
294 294 text = 'open("foo'
295 295 c = ip.Completer._complete(cursor_line=0,
296 296 cursor_pos=len(text),
297 297 full_text=text)[1]
298 298 nt.assert_equal(c, [name])
299 299
300 300 # No quote requires an escape
301 301 text = '%ls foo'
302 302 c = ip.Completer._complete(cursor_line=0,
303 303 cursor_pos=len(text),
304 304 full_text=text)[1]
305 305 nt.assert_equal(c, [escaped])
306 306
307 307
308 308 def test_jedi():
309 309 """
310 310 A couple of issue we had with Jedi
311 311 """
312 312 ip = get_ipython()
313 313
314 314 def _test_complete(reason, s, comp, start=None, end=None):
315 315 l = len(s)
316 316 start = start if start is not None else l
317 317 end = end if end is not None else l
318 318 with provisionalcompleter():
319 319 completions = set(ip.Completer.completions(s, l))
320 320 assert_in(Completion(start, end, comp), completions, reason)
321 321
322 322 def _test_not_complete(reason, s, comp):
323 323 l = len(s)
324 324 with provisionalcompleter():
325 325 completions = set(ip.Completer.completions(s, l))
326 326 assert_not_in(Completion(l, l, comp), completions, reason)
327 327
328 328 import jedi
329 329 jedi_version = tuple(int(i) for i in jedi.__version__.split('.')[:3])
330 330 if jedi_version > (0, 10):
331 331 yield _test_complete, 'jedi >0.9 should complete and not crash', 'a=1;a.', 'real'
332 332 yield _test_complete, 'can infer first argument', 'a=(1,"foo");a[0].', 'real'
333 333 yield _test_complete, 'can infer second argument', 'a=(1,"foo");a[1].', 'capitalize'
334 334 yield _test_complete, 'cover duplicate completions', 'im', 'import', 0, 2
335 335
336 336 yield _test_not_complete, 'does not mix types', 'a=(1,"foo");a[0].', 'capitalize'
337 337
338 338 def test_deduplicate_completions():
339 339 """
340 340 Test that completions are correctly deduplicated (even if ranges are not the same)
341 341 """
342 342 ip = get_ipython()
343 343 ip.ex(textwrap.dedent('''
344 344 class Z:
345 345 zoo = 1
346 346 '''))
347 347 with provisionalcompleter():
348 348 l = list(_deduplicate_completions('Z.z', ip.Completer.completions('Z.z', 3)))
349 349
350 350 assert len(l) == 1, 'Completions (Z.z<tab>) correctly deduplicate: %s ' % l
351 351 assert l[0].text == 'zoo' # and not `it.accumulate`
352 352
353 353
354 354 def test_greedy_completions():
355 355 """
356 356 Test the capability of the Greedy completer.
357 357
358 358 Most of the test here do not really show off the greedy completer, for proof
359 359 each of the text bellow now pass with Jedi. The greedy completer is capable of more.
360 360
361 361 See the :any:`test_dict_key_completion_contexts`
362 362
363 363 """
364 364 ip = get_ipython()
365 365 ip.ex('a=list(range(5))')
366 366 _,c = ip.complete('.',line='a[0].')
367 367 nt.assert_false('.real' in c,
368 368 "Shouldn't have completed on a[0]: %s"%c)
369 369 with greedy_completion(), provisionalcompleter():
370 370 def _(line, cursor_pos, expect, message, completion):
371 371 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
372 372 with provisionalcompleter():
373 373 completions = ip.Completer.completions(line, cursor_pos)
374 374 nt.assert_in(expect, c, message%c)
375 375 nt.assert_in(completion, completions)
376 376
377 377 yield _, 'a[0].', 5, 'a[0].real', "Should have completed on a[0].: %s", Completion(5,5, 'real')
378 378 yield _, 'a[0].r', 6, 'a[0].real', "Should have completed on a[0].r: %s", Completion(5,6, 'real')
379 379
380 380 if sys.version_info > (3, 4):
381 381 yield _, 'a[0].from_', 10, 'a[0].from_bytes', "Should have completed on a[0].from_: %s", Completion(5, 10, 'from_bytes')
382 382
383 383
384 384 def test_omit__names():
385 385 # also happens to test IPCompleter as a configurable
386 386 ip = get_ipython()
387 387 ip._hidden_attr = 1
388 388 ip._x = {}
389 389 c = ip.Completer
390 390 ip.ex('ip=get_ipython()')
391 391 cfg = Config()
392 392 cfg.IPCompleter.omit__names = 0
393 393 c.update_config(cfg)
394 394 with provisionalcompleter():
395 395 s,matches = c.complete('ip.')
396 396 completions = set(c.completions('ip.', 3))
397 397
398 398 nt.assert_in('ip.__str__', matches)
399 399 nt.assert_in(Completion(3, 3, '__str__'), completions)
400 400
401 401 nt.assert_in('ip._hidden_attr', matches)
402 402 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
403 403
404 404
405 405 cfg = Config()
406 406 cfg.IPCompleter.omit__names = 1
407 407 c.update_config(cfg)
408 408 with provisionalcompleter():
409 409 s,matches = c.complete('ip.')
410 410 completions = set(c.completions('ip.', 3))
411 411
412 412 nt.assert_not_in('ip.__str__', matches)
413 413 nt.assert_not_in(Completion(3,3,'__str__'), completions)
414 414
415 415 # nt.assert_in('ip._hidden_attr', matches)
416 416 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
417 417
418 418 cfg = Config()
419 419 cfg.IPCompleter.omit__names = 2
420 420 c.update_config(cfg)
421 421 with provisionalcompleter():
422 422 s,matches = c.complete('ip.')
423 423 completions = set(c.completions('ip.', 3))
424 424
425 425 nt.assert_not_in('ip.__str__', matches)
426 426 nt.assert_not_in(Completion(3,3,'__str__'), completions)
427 427
428 428 nt.assert_not_in('ip._hidden_attr', matches)
429 429 nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
430 430
431 431 with provisionalcompleter():
432 432 s,matches = c.complete('ip._x.')
433 433 completions = set(c.completions('ip._x.', 6))
434 434
435 435 nt.assert_in('ip._x.keys', matches)
436 436 nt.assert_in(Completion(6,6, "keys"), completions)
437 437
438 438 del ip._hidden_attr
439 439 del ip._x
440 440
441 441
442 442 def test_limit_to__all__False_ok():
443 443 """
444 444 Limit to all is deprecated, once we remove it this test can go away.
445 445 """
446 446 ip = get_ipython()
447 447 c = ip.Completer
448 448 ip.ex('class D: x=24')
449 449 ip.ex('d=D()')
450 450 cfg = Config()
451 451 cfg.IPCompleter.limit_to__all__ = False
452 452 c.update_config(cfg)
453 453 s, matches = c.complete('d.')
454 454 nt.assert_in('d.x', matches)
455 455
456 456
457 457 def test_get__all__entries_ok():
458 458 class A(object):
459 459 __all__ = ['x', 1]
460 460 words = completer.get__all__entries(A())
461 461 nt.assert_equal(words, ['x'])
462 462
463 463
464 464 def test_get__all__entries_no__all__ok():
465 465 class A(object):
466 466 pass
467 467 words = completer.get__all__entries(A())
468 468 nt.assert_equal(words, [])
469 469
470 470
471 471 def test_func_kw_completions():
472 472 ip = get_ipython()
473 473 c = ip.Completer
474 474 ip.ex('def myfunc(a=1,b=2): return a+b')
475 475 s, matches = c.complete(None, 'myfunc(1,b')
476 476 nt.assert_in('b=', matches)
477 477 # Simulate completing with cursor right after b (pos==10):
478 478 s, matches = c.complete(None, 'myfunc(1,b)', 10)
479 479 nt.assert_in('b=', matches)
480 480 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
481 481 nt.assert_in('b=', matches)
482 482 #builtin function
483 483 s, matches = c.complete(None, 'min(k, k')
484 484 nt.assert_in('key=', matches)
485 485
486 486
487 487 def test_default_arguments_from_docstring():
488 488 ip = get_ipython()
489 489 c = ip.Completer
490 490 kwd = c._default_arguments_from_docstring(
491 491 'min(iterable[, key=func]) -> value')
492 492 nt.assert_equal(kwd, ['key'])
493 493 #with cython type etc
494 494 kwd = c._default_arguments_from_docstring(
495 495 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
496 496 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
497 497 #white spaces
498 498 kwd = c._default_arguments_from_docstring(
499 499 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
500 500 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
501 501
502 502 def test_line_magics():
503 503 ip = get_ipython()
504 504 c = ip.Completer
505 505 s, matches = c.complete(None, 'lsmag')
506 506 nt.assert_in('%lsmagic', matches)
507 507 s, matches = c.complete(None, '%lsmag')
508 508 nt.assert_in('%lsmagic', matches)
509 509
510 510
511 511 def test_cell_magics():
512 512 from IPython.core.magic import register_cell_magic
513 513
514 514 @register_cell_magic
515 515 def _foo_cellm(line, cell):
516 516 pass
517 517
518 518 ip = get_ipython()
519 519 c = ip.Completer
520 520
521 521 s, matches = c.complete(None, '_foo_ce')
522 522 nt.assert_in('%%_foo_cellm', matches)
523 523 s, matches = c.complete(None, '%%_foo_ce')
524 524 nt.assert_in('%%_foo_cellm', matches)
525 525
526 526
527 527 def test_line_cell_magics():
528 528 from IPython.core.magic import register_line_cell_magic
529 529
530 530 @register_line_cell_magic
531 531 def _bar_cellm(line, cell):
532 532 pass
533 533
534 534 ip = get_ipython()
535 535 c = ip.Completer
536 536
537 537 # The policy here is trickier, see comments in completion code. The
538 538 # returned values depend on whether the user passes %% or not explicitly,
539 539 # and this will show a difference if the same name is both a line and cell
540 540 # magic.
541 541 s, matches = c.complete(None, '_bar_ce')
542 542 nt.assert_in('%_bar_cellm', matches)
543 543 nt.assert_in('%%_bar_cellm', matches)
544 544 s, matches = c.complete(None, '%_bar_ce')
545 545 nt.assert_in('%_bar_cellm', matches)
546 546 nt.assert_in('%%_bar_cellm', matches)
547 547 s, matches = c.complete(None, '%%_bar_ce')
548 548 nt.assert_not_in('%_bar_cellm', matches)
549 549 nt.assert_in('%%_bar_cellm', matches)
550 550
551 551
552 552 def test_magic_completion_order():
553 553 ip = get_ipython()
554 554 c = ip.Completer
555 555
556 556 # Test ordering of line and cell magics.
557 557 text, matches = c.complete("timeit")
558 558 nt.assert_equal(matches, ["%timeit", "%%timeit"])
559 559
560 560
561 561 def test_magic_completion_shadowing():
562 562 ip = get_ipython()
563 563 c = ip.Completer
564 564
565 565 # Before importing matplotlib, %matplotlib magic should be the only option.
566 566 text, matches = c.complete("mat")
567 567 nt.assert_equal(matches, ["%matplotlib"])
568 568
569 569 # The newly introduced name should shadow the magic.
570 570 ip.run_cell("matplotlib = 1")
571 571 text, matches = c.complete("mat")
572 572 nt.assert_equal(matches, ["matplotlib"])
573 573
574 574 # After removing matplotlib from namespace, the magic should again be
575 575 # the only option.
576 576 del ip.user_ns["matplotlib"]
577 577 text, matches = c.complete("mat")
578 578 nt.assert_equal(matches, ["%matplotlib"])
579 579
580 580
581
582 581 def test_magic_config():
583 582 ip = get_ipython()
584 583 c = ip.Completer
585 584
586 585 s, matches = c.complete(None, 'conf')
587 586 nt.assert_in('%config', matches)
587 s, matches = c.complete(None, 'conf')
588 nt.assert_not_in('AliasManager', matches)
588 589 s, matches = c.complete(None, 'config ')
589 590 nt.assert_in('AliasManager', matches)
590 591 s, matches = c.complete(None, '%config ')
591 592 nt.assert_in('AliasManager', matches)
592 593 s, matches = c.complete(None, 'config Ali')
593 nt.assert_in('AliasManager', matches)
594 nt.assert_list_equal(['AliasManager'], matches)
594 595 s, matches = c.complete(None, '%config Ali')
595 nt.assert_in('AliasManager', matches)
596 nt.assert_list_equal(['AliasManager'], matches)
596 597 s, matches = c.complete(None, 'config AliasManager')
597 598 nt.assert_list_equal(['AliasManager'], matches)
598 599 s, matches = c.complete(None, '%config AliasManager')
599 600 nt.assert_list_equal(['AliasManager'], matches)
600 601 s, matches = c.complete(None, 'config AliasManager.')
601 602 nt.assert_in('AliasManager.default_aliases', matches)
602 603 s, matches = c.complete(None, '%config AliasManager.')
603 604 nt.assert_in('AliasManager.default_aliases', matches)
604 605 s, matches = c.complete(None, 'config AliasManager.de')
605 nt.assert_in('AliasManager.default_aliases', matches)
606 nt.assert_list_equal(['AliasManager.default_aliases'], matches)
606 607 s, matches = c.complete(None, 'config AliasManager.de')
607 nt.assert_in('AliasManager.default_aliases', matches)
608 nt.assert_list_equal(['AliasManager.default_aliases'], matches)
609
610
611 def test_magic_color():
612 ip = get_ipython()
613 c = ip.Completer
614
615 s, matches = c.complete(None, 'colo')
616 nt.assert_in('%colors', matches)
617 s, matches = c.complete(None, 'colo')
618 nt.assert_not_in('NoColor', matches)
619 s, matches = c.complete(None, 'colors ')
620 nt.assert_in('NoColor', matches)
621 s, matches = c.complete(None, '%colors ')
622 nt.assert_in('NoColor', matches)
623 s, matches = c.complete(None, 'colors NoCo')
624 nt.assert_list_equal(['NoColor'], matches)
625 s, matches = c.complete(None, '%colors NoCo')
626 nt.assert_list_equal(['NoColor'], matches)
608 627
609 628
610 629 def test_match_dict_keys():
611 630 """
612 631 Test that match_dict_keys works on a couple of use case does return what
613 632 expected, and does not crash
614 633 """
615 634 delims = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
616 635
617 636
618 637 keys = ['foo', b'far']
619 638 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2 ,['far'])
620 639 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2 ,['far'])
621 640 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2 ,['far'])
622 641 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2 ,['far'])
623 642
624 643 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1 ,['foo'])
625 644 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1 ,['foo'])
626 645 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1 ,['foo'])
627 646 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1 ,['foo'])
628 647
629 648 match_dict_keys
630 649
631 650
632 651 def test_dict_key_completion_string():
633 652 """Test dictionary key completion for string keys"""
634 653 ip = get_ipython()
635 654 complete = ip.Completer.complete
636 655
637 656 ip.user_ns['d'] = {'abc': None}
638 657
639 658 # check completion at different stages
640 659 _, matches = complete(line_buffer="d[")
641 660 nt.assert_in("'abc'", matches)
642 661 nt.assert_not_in("'abc']", matches)
643 662
644 663 _, matches = complete(line_buffer="d['")
645 664 nt.assert_in("abc", matches)
646 665 nt.assert_not_in("abc']", matches)
647 666
648 667 _, matches = complete(line_buffer="d['a")
649 668 nt.assert_in("abc", matches)
650 669 nt.assert_not_in("abc']", matches)
651 670
652 671 # check use of different quoting
653 672 _, matches = complete(line_buffer="d[\"")
654 673 nt.assert_in("abc", matches)
655 674 nt.assert_not_in('abc\"]', matches)
656 675
657 676 _, matches = complete(line_buffer="d[\"a")
658 677 nt.assert_in("abc", matches)
659 678 nt.assert_not_in('abc\"]', matches)
660 679
661 680 # check sensitivity to following context
662 681 _, matches = complete(line_buffer="d[]", cursor_pos=2)
663 682 nt.assert_in("'abc'", matches)
664 683
665 684 _, matches = complete(line_buffer="d['']", cursor_pos=3)
666 685 nt.assert_in("abc", matches)
667 686 nt.assert_not_in("abc'", matches)
668 687 nt.assert_not_in("abc']", matches)
669 688
670 689 # check multiple solutions are correctly returned and that noise is not
671 690 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
672 691 5: None}
673 692
674 693 _, matches = complete(line_buffer="d['a")
675 694 nt.assert_in("abc", matches)
676 695 nt.assert_in("abd", matches)
677 696 nt.assert_not_in("bad", matches)
678 697 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
679 698
680 699 # check escaping and whitespace
681 700 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
682 701 _, matches = complete(line_buffer="d['a")
683 702 nt.assert_in("a\\nb", matches)
684 703 nt.assert_in("a\\'b", matches)
685 704 nt.assert_in("a\"b", matches)
686 705 nt.assert_in("a word", matches)
687 706 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
688 707
689 708 # - can complete on non-initial word of the string
690 709 _, matches = complete(line_buffer="d['a w")
691 710 nt.assert_in("word", matches)
692 711
693 712 # - understands quote escaping
694 713 _, matches = complete(line_buffer="d['a\\'")
695 714 nt.assert_in("b", matches)
696 715
697 716 # - default quoting should work like repr
698 717 _, matches = complete(line_buffer="d[")
699 718 nt.assert_in("\"a'b\"", matches)
700 719
701 720 # - when opening quote with ", possible to match with unescaped apostrophe
702 721 _, matches = complete(line_buffer="d[\"a'")
703 722 nt.assert_in("b", matches)
704 723
705 724 # need to not split at delims that readline won't split at
706 725 if '-' not in ip.Completer.splitter.delims:
707 726 ip.user_ns['d'] = {'before-after': None}
708 727 _, matches = complete(line_buffer="d['before-af")
709 728 nt.assert_in('before-after', matches)
710 729
711 730 def test_dict_key_completion_contexts():
712 731 """Test expression contexts in which dict key completion occurs"""
713 732 ip = get_ipython()
714 733 complete = ip.Completer.complete
715 734 d = {'abc': None}
716 735 ip.user_ns['d'] = d
717 736
718 737 class C:
719 738 data = d
720 739 ip.user_ns['C'] = C
721 740 ip.user_ns['get'] = lambda: d
722 741
723 742 def assert_no_completion(**kwargs):
724 743 _, matches = complete(**kwargs)
725 744 nt.assert_not_in('abc', matches)
726 745 nt.assert_not_in('abc\'', matches)
727 746 nt.assert_not_in('abc\']', matches)
728 747 nt.assert_not_in('\'abc\'', matches)
729 748 nt.assert_not_in('\'abc\']', matches)
730 749
731 750 def assert_completion(**kwargs):
732 751 _, matches = complete(**kwargs)
733 752 nt.assert_in("'abc'", matches)
734 753 nt.assert_not_in("'abc']", matches)
735 754
736 755 # no completion after string closed, even if reopened
737 756 assert_no_completion(line_buffer="d['a'")
738 757 assert_no_completion(line_buffer="d[\"a\"")
739 758 assert_no_completion(line_buffer="d['a' + ")
740 759 assert_no_completion(line_buffer="d['a' + '")
741 760
742 761 # completion in non-trivial expressions
743 762 assert_completion(line_buffer="+ d[")
744 763 assert_completion(line_buffer="(d[")
745 764 assert_completion(line_buffer="C.data[")
746 765
747 766 # greedy flag
748 767 def assert_completion(**kwargs):
749 768 _, matches = complete(**kwargs)
750 769 nt.assert_in("get()['abc']", matches)
751 770
752 771 assert_no_completion(line_buffer="get()[")
753 772 with greedy_completion():
754 773 assert_completion(line_buffer="get()[")
755 774 assert_completion(line_buffer="get()['")
756 775 assert_completion(line_buffer="get()['a")
757 776 assert_completion(line_buffer="get()['ab")
758 777 assert_completion(line_buffer="get()['abc")
759 778
760 779
761 780
762 781 def test_dict_key_completion_bytes():
763 782 """Test handling of bytes in dict key completion"""
764 783 ip = get_ipython()
765 784 complete = ip.Completer.complete
766 785
767 786 ip.user_ns['d'] = {'abc': None, b'abd': None}
768 787
769 788 _, matches = complete(line_buffer="d[")
770 789 nt.assert_in("'abc'", matches)
771 790 nt.assert_in("b'abd'", matches)
772 791
773 792 if False: # not currently implemented
774 793 _, matches = complete(line_buffer="d[b")
775 794 nt.assert_in("b'abd'", matches)
776 795 nt.assert_not_in("b'abc'", matches)
777 796
778 797 _, matches = complete(line_buffer="d[b'")
779 798 nt.assert_in("abd", matches)
780 799 nt.assert_not_in("abc", matches)
781 800
782 801 _, matches = complete(line_buffer="d[B'")
783 802 nt.assert_in("abd", matches)
784 803 nt.assert_not_in("abc", matches)
785 804
786 805 _, matches = complete(line_buffer="d['")
787 806 nt.assert_in("abc", matches)
788 807 nt.assert_not_in("abd", matches)
789 808
790 809
791 810 def test_dict_key_completion_unicode_py3():
792 811 """Test handling of unicode in dict key completion"""
793 812 ip = get_ipython()
794 813 complete = ip.Completer.complete
795 814
796 815 ip.user_ns['d'] = {u'a\u05d0': None}
797 816
798 817 # query using escape
799 818 if sys.platform != 'win32':
800 819 # Known failure on Windows
801 820 _, matches = complete(line_buffer="d['a\\u05d0")
802 821 nt.assert_in("u05d0", matches) # tokenized after \\
803 822
804 823 # query using character
805 824 _, matches = complete(line_buffer="d['a\u05d0")
806 825 nt.assert_in(u"a\u05d0", matches)
807 826
808 827 with greedy_completion():
809 828 # query using escape
810 829 _, matches = complete(line_buffer="d['a\\u05d0")
811 830 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
812 831
813 832 # query using character
814 833 _, matches = complete(line_buffer="d['a\u05d0")
815 834 nt.assert_in(u"d['a\u05d0']", matches)
816 835
817 836
818 837
819 838 @dec.skip_without('numpy')
820 839 def test_struct_array_key_completion():
821 840 """Test dict key completion applies to numpy struct arrays"""
822 841 import numpy
823 842 ip = get_ipython()
824 843 complete = ip.Completer.complete
825 844 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
826 845 _, matches = complete(line_buffer="d['")
827 846 nt.assert_in("hello", matches)
828 847 nt.assert_in("world", matches)
829 848 # complete on the numpy struct itself
830 849 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
831 850 ('my_data', '>f4', 5)])
832 851 x = numpy.zeros(2, dtype=dt)
833 852 ip.user_ns['d'] = x[1]
834 853 _, matches = complete(line_buffer="d['")
835 854 nt.assert_in("my_head", matches)
836 855 nt.assert_in("my_data", matches)
837 856 # complete on a nested level
838 857 with greedy_completion():
839 858 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
840 859 _, matches = complete(line_buffer="d[1]['my_head']['")
841 860 nt.assert_true(any(["my_dt" in m for m in matches]))
842 861 nt.assert_true(any(["my_df" in m for m in matches]))
843 862
844 863
845 864 @dec.skip_without('pandas')
846 865 def test_dataframe_key_completion():
847 866 """Test dict key completion applies to pandas DataFrames"""
848 867 import pandas
849 868 ip = get_ipython()
850 869 complete = ip.Completer.complete
851 870 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
852 871 _, matches = complete(line_buffer="d['")
853 872 nt.assert_in("hello", matches)
854 873 nt.assert_in("world", matches)
855 874
856 875
857 876 def test_dict_key_completion_invalids():
858 877 """Smoke test cases dict key completion can't handle"""
859 878 ip = get_ipython()
860 879 complete = ip.Completer.complete
861 880
862 881 ip.user_ns['no_getitem'] = None
863 882 ip.user_ns['no_keys'] = []
864 883 ip.user_ns['cant_call_keys'] = dict
865 884 ip.user_ns['empty'] = {}
866 885 ip.user_ns['d'] = {'abc': 5}
867 886
868 887 _, matches = complete(line_buffer="no_getitem['")
869 888 _, matches = complete(line_buffer="no_keys['")
870 889 _, matches = complete(line_buffer="cant_call_keys['")
871 890 _, matches = complete(line_buffer="empty['")
872 891 _, matches = complete(line_buffer="name_error['")
873 892 _, matches = complete(line_buffer="d['\\") # incomplete escape
874 893
875 894 class KeyCompletable(object):
876 895 def __init__(self, things=()):
877 896 self.things = things
878 897
879 898 def _ipython_key_completions_(self):
880 899 return list(self.things)
881 900
882 901 def test_object_key_completion():
883 902 ip = get_ipython()
884 903 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
885 904
886 905 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
887 906 nt.assert_in('qwerty', matches)
888 907 nt.assert_in('qwick', matches)
889 908
890 909
891 910 def test_tryimport():
892 911 """
893 912 Test that try-import don't crash on trailing dot, and import modules before
894 913 """
895 914 from IPython.core.completerlib import try_import
896 915 assert(try_import("IPython."))
897 916
898 917
899 918 def test_aimport_module_completer():
900 919 ip = get_ipython()
901 920 _, matches = ip.complete('i', '%aimport i')
902 921 nt.assert_in('io', matches)
903 922 nt.assert_not_in('int', matches)
904 923
905 924 def test_nested_import_module_completer():
906 925 ip = get_ipython()
907 926 _, matches = ip.complete(None, 'import IPython.co', 17)
908 927 nt.assert_in('IPython.core', matches)
909 928 nt.assert_not_in('import IPython.core', matches)
910 929 nt.assert_not_in('IPython.display', matches)
911 930
912 931 def test_import_module_completer():
913 932 ip = get_ipython()
914 933 _, matches = ip.complete('i', 'import i')
915 934 nt.assert_in('io', matches)
916 935 nt.assert_not_in('int', matches)
917 936
918 937 def test_from_module_completer():
919 938 ip = get_ipython()
920 939 _, matches = ip.complete('B', 'from io import B', 16)
921 940 nt.assert_in('BytesIO', matches)
922 941 nt.assert_not_in('BaseException', matches)
923 942
924 943 def test_snake_case_completion():
925 944 ip = get_ipython()
926 945 ip.user_ns['some_three'] = 3
927 946 ip.user_ns['some_four'] = 4
928 947 _, matches = ip.complete("s_", "print(s_f")
929 948 nt.assert_in('some_three', matches)
930 949 nt.assert_in('some_four', matches) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now