##// END OF EJS Templates
Typing '%' restrict autocompletion to magics
ygeyzel -
Show More
@@ -1,2271 +1,2272 b''
1 1 """Completion for IPython.
2 2
3 3 This module started as fork of the rlcompleter module in the Python standard
4 4 library. The original enhancements made to rlcompleter have been sent
5 5 upstream and were accepted as of Python 2.3,
6 6
7 7 This module now support a wide variety of completion mechanism both available
8 8 for normal classic Python code, as well as completer for IPython specific
9 9 Syntax like magics.
10 10
11 11 Latex and Unicode completion
12 12 ============================
13 13
14 14 IPython and compatible frontends not only can complete your code, but can help
15 15 you to input a wide range of characters. In particular we allow you to insert
16 16 a unicode character using the tab completion mechanism.
17 17
18 18 Forward latex/unicode completion
19 19 --------------------------------
20 20
21 21 Forward completion allows you to easily type a unicode character using its latex
22 22 name, or unicode long description. To do so type a backslash follow by the
23 23 relevant name and press tab:
24 24
25 25
26 26 Using latex completion:
27 27
28 28 .. code::
29 29
30 30 \\alpha<tab>
31 31 Ξ±
32 32
33 33 or using unicode completion:
34 34
35 35
36 36 .. code::
37 37
38 38 \\GREEK SMALL LETTER ALPHA<tab>
39 39 Ξ±
40 40
41 41
42 42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 43 dots) are also available, unlike latex they need to be put after the their
44 44 counterpart that is to say, `F\\\\vec<tab>` is correct, not `\\\\vec<tab>F`.
45 45
46 46 Some browsers are known to display combining characters incorrectly.
47 47
48 48 Backward latex completion
49 49 -------------------------
50 50
51 51 It is sometime challenging to know how to type a character, if you are using
52 52 IPython, or any compatible frontend you can prepend backslash to the character
53 53 and press `<tab>` to expand it to its latex form.
54 54
55 55 .. code::
56 56
57 57 \\Ξ±<tab>
58 58 \\alpha
59 59
60 60
61 61 Both forward and backward completions can be deactivated by setting the
62 62 ``Completer.backslash_combining_completions`` option to ``False``.
63 63
64 64
65 65 Experimental
66 66 ============
67 67
68 68 Starting with IPython 6.0, this module can make use of the Jedi library to
69 69 generate completions both using static analysis of the code, and dynamically
70 70 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
71 71 for Python. The APIs attached to this new mechanism is unstable and will
72 72 raise unless use in an :any:`provisionalcompleter` context manager.
73 73
74 74 You will find that the following are experimental:
75 75
76 76 - :any:`provisionalcompleter`
77 77 - :any:`IPCompleter.completions`
78 78 - :any:`Completion`
79 79 - :any:`rectify_completions`
80 80
81 81 .. note::
82 82
83 83 better name for :any:`rectify_completions` ?
84 84
85 85 We welcome any feedback on these new API, and we also encourage you to try this
86 86 module in debug mode (start IPython with ``--Completer.debug=True``) in order
87 87 to have extra logging information if :any:`jedi` is crashing, or if current
88 88 IPython completer pending deprecations are returning results not yet handled
89 89 by :any:`jedi`
90 90
91 91 Using Jedi for tab completion allow snippets like the following to work without
92 92 having to execute any code:
93 93
94 94 >>> myvar = ['hello', 42]
95 95 ... myvar[1].bi<tab>
96 96
97 97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 98 executing any code unlike the previously available ``IPCompleter.greedy``
99 99 option.
100 100
101 101 Be sure to update :any:`jedi` to the latest stable version or to try the
102 102 current development version to get better completions.
103 103 """
104 104
105 105
106 106 # Copyright (c) IPython Development Team.
107 107 # Distributed under the terms of the Modified BSD License.
108 108 #
109 109 # Some of this code originated from rlcompleter in the Python standard library
110 110 # Copyright (C) 2001 Python Software Foundation, www.python.org
111 111
112 112
113 113 import builtins as builtin_mod
114 114 import glob
115 115 import inspect
116 116 import itertools
117 117 import keyword
118 118 import os
119 119 import re
120 120 import string
121 121 import sys
122 122 import time
123 123 import unicodedata
124 124 import uuid
125 125 import warnings
126 126 from contextlib import contextmanager
127 127 from importlib import import_module
128 128 from types import SimpleNamespace
129 129 from typing import Iterable, Iterator, List, Tuple, Union, Any, Sequence, Dict, NamedTuple, Pattern, Optional
130 130
131 131 from IPython.core.error import TryNext
132 132 from IPython.core.inputtransformer2 import ESC_MAGIC
133 133 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
134 134 from IPython.core.oinspect import InspectColors
135 135 from IPython.testing.skipdoctest import skip_doctest
136 136 from IPython.utils import generics
137 137 from IPython.utils.dir2 import dir2, get_real_method
138 138 from IPython.utils.path import ensure_dir_exists
139 139 from IPython.utils.process import arg_split
140 140 from traitlets import Bool, Enum, Int, List as ListTrait, Unicode, default, observe
141 141 from traitlets.config.configurable import Configurable
142 142
143 143 import __main__
144 144
145 145 # skip module docstests
146 146 __skip_doctest__ = True
147 147
148 148 try:
149 149 import jedi
150 150 jedi.settings.case_insensitive_completion = False
151 151 import jedi.api.helpers
152 152 import jedi.api.classes
153 153 JEDI_INSTALLED = True
154 154 except ImportError:
155 155 JEDI_INSTALLED = False
156 156 #-----------------------------------------------------------------------------
157 157 # Globals
158 158 #-----------------------------------------------------------------------------
159 159
160 160 # ranges where we have most of the valid unicode names. We could be more finer
161 161 # grained but is it worth it for performance While unicode have character in the
162 162 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
163 163 # write this). With below range we cover them all, with a density of ~67%
164 164 # biggest next gap we consider only adds up about 1% density and there are 600
165 165 # gaps that would need hard coding.
166 166 _UNICODE_RANGES = [(32, 0x3134b), (0xe0001, 0xe01f0)]
167 167
168 168 # Public API
169 169 __all__ = ['Completer','IPCompleter']
170 170
171 171 if sys.platform == 'win32':
172 172 PROTECTABLES = ' '
173 173 else:
174 174 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
175 175
176 176 # Protect against returning an enormous number of completions which the frontend
177 177 # may have trouble processing.
178 178 MATCHES_LIMIT = 500
179 179
180 180
181 181 class ProvisionalCompleterWarning(FutureWarning):
182 182 """
183 183 Exception raise by an experimental feature in this module.
184 184
185 185 Wrap code in :any:`provisionalcompleter` context manager if you
186 186 are certain you want to use an unstable feature.
187 187 """
188 188 pass
189 189
190 190 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
191 191
192 192
193 193 @skip_doctest
194 194 @contextmanager
195 195 def provisionalcompleter(action='ignore'):
196 196 """
197 197 This context manager has to be used in any place where unstable completer
198 198 behavior and API may be called.
199 199
200 200 >>> with provisionalcompleter():
201 201 ... completer.do_experimental_things() # works
202 202
203 203 >>> completer.do_experimental_things() # raises.
204 204
205 205 .. note::
206 206
207 207 Unstable
208 208
209 209 By using this context manager you agree that the API in use may change
210 210 without warning, and that you won't complain if they do so.
211 211
212 212 You also understand that, if the API is not to your liking, you should report
213 213 a bug to explain your use case upstream.
214 214
215 215 We'll be happy to get your feedback, feature requests, and improvements on
216 216 any of the unstable APIs!
217 217 """
218 218 with warnings.catch_warnings():
219 219 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
220 220 yield
221 221
222 222
223 223 def has_open_quotes(s):
224 224 """Return whether a string has open quotes.
225 225
226 226 This simply counts whether the number of quote characters of either type in
227 227 the string is odd.
228 228
229 229 Returns
230 230 -------
231 231 If there is an open quote, the quote character is returned. Else, return
232 232 False.
233 233 """
234 234 # We check " first, then ', so complex cases with nested quotes will get
235 235 # the " to take precedence.
236 236 if s.count('"') % 2:
237 237 return '"'
238 238 elif s.count("'") % 2:
239 239 return "'"
240 240 else:
241 241 return False
242 242
243 243
244 244 def protect_filename(s, protectables=PROTECTABLES):
245 245 """Escape a string to protect certain characters."""
246 246 if set(s) & set(protectables):
247 247 if sys.platform == "win32":
248 248 return '"' + s + '"'
249 249 else:
250 250 return "".join(("\\" + c if c in protectables else c) for c in s)
251 251 else:
252 252 return s
253 253
254 254
255 255 def expand_user(path:str) -> Tuple[str, bool, str]:
256 256 """Expand ``~``-style usernames in strings.
257 257
258 258 This is similar to :func:`os.path.expanduser`, but it computes and returns
259 259 extra information that will be useful if the input was being used in
260 260 computing completions, and you wish to return the completions with the
261 261 original '~' instead of its expanded value.
262 262
263 263 Parameters
264 264 ----------
265 265 path : str
266 266 String to be expanded. If no ~ is present, the output is the same as the
267 267 input.
268 268
269 269 Returns
270 270 -------
271 271 newpath : str
272 272 Result of ~ expansion in the input path.
273 273 tilde_expand : bool
274 274 Whether any expansion was performed or not.
275 275 tilde_val : str
276 276 The value that ~ was replaced with.
277 277 """
278 278 # Default values
279 279 tilde_expand = False
280 280 tilde_val = ''
281 281 newpath = path
282 282
283 283 if path.startswith('~'):
284 284 tilde_expand = True
285 285 rest = len(path)-1
286 286 newpath = os.path.expanduser(path)
287 287 if rest:
288 288 tilde_val = newpath[:-rest]
289 289 else:
290 290 tilde_val = newpath
291 291
292 292 return newpath, tilde_expand, tilde_val
293 293
294 294
295 295 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
296 296 """Does the opposite of expand_user, with its outputs.
297 297 """
298 298 if tilde_expand:
299 299 return path.replace(tilde_val, '~')
300 300 else:
301 301 return path
302 302
303 303
304 304 def completions_sorting_key(word):
305 305 """key for sorting completions
306 306
307 307 This does several things:
308 308
309 309 - Demote any completions starting with underscores to the end
310 310 - Insert any %magic and %%cellmagic completions in the alphabetical order
311 311 by their name
312 312 """
313 313 prio1, prio2 = 0, 0
314 314
315 315 if word.startswith('__'):
316 316 prio1 = 2
317 317 elif word.startswith('_'):
318 318 prio1 = 1
319 319
320 320 if word.endswith('='):
321 321 prio1 = -1
322 322
323 323 if word.startswith('%%'):
324 324 # If there's another % in there, this is something else, so leave it alone
325 325 if not "%" in word[2:]:
326 326 word = word[2:]
327 327 prio2 = 2
328 328 elif word.startswith('%'):
329 329 if not "%" in word[1:]:
330 330 word = word[1:]
331 331 prio2 = 1
332 332
333 333 return prio1, word, prio2
334 334
335 335
336 336 class _FakeJediCompletion:
337 337 """
338 338 This is a workaround to communicate to the UI that Jedi has crashed and to
339 339 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
340 340
341 341 Added in IPython 6.0 so should likely be removed for 7.0
342 342
343 343 """
344 344
345 345 def __init__(self, name):
346 346
347 347 self.name = name
348 348 self.complete = name
349 349 self.type = 'crashed'
350 350 self.name_with_symbols = name
351 351 self.signature = ''
352 352 self._origin = 'fake'
353 353
354 354 def __repr__(self):
355 355 return '<Fake completion object jedi has crashed>'
356 356
357 357
358 358 class Completion:
359 359 """
360 360 Completion object used and return by IPython completers.
361 361
362 362 .. warning::
363 363
364 364 Unstable
365 365
366 366 This function is unstable, API may change without warning.
367 367 It will also raise unless use in proper context manager.
368 368
369 369 This act as a middle ground :any:`Completion` object between the
370 370 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
371 371 object. While Jedi need a lot of information about evaluator and how the
372 372 code should be ran/inspected, PromptToolkit (and other frontend) mostly
373 373 need user facing information.
374 374
375 375 - Which range should be replaced replaced by what.
376 376 - Some metadata (like completion type), or meta information to displayed to
377 377 the use user.
378 378
379 379 For debugging purpose we can also store the origin of the completion (``jedi``,
380 380 ``IPython.python_matches``, ``IPython.magics_matches``...).
381 381 """
382 382
383 383 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
384 384
385 385 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
386 386 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
387 387 "It may change without warnings. "
388 388 "Use in corresponding context manager.",
389 389 category=ProvisionalCompleterWarning, stacklevel=2)
390 390
391 391 self.start = start
392 392 self.end = end
393 393 self.text = text
394 394 self.type = type
395 395 self.signature = signature
396 396 self._origin = _origin
397 397
398 398 def __repr__(self):
399 399 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
400 400 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
401 401
402 402 def __eq__(self, other)->Bool:
403 403 """
404 404 Equality and hash do not hash the type (as some completer may not be
405 405 able to infer the type), but are use to (partially) de-duplicate
406 406 completion.
407 407
408 408 Completely de-duplicating completion is a bit tricker that just
409 409 comparing as it depends on surrounding text, which Completions are not
410 410 aware of.
411 411 """
412 412 return self.start == other.start and \
413 413 self.end == other.end and \
414 414 self.text == other.text
415 415
416 416 def __hash__(self):
417 417 return hash((self.start, self.end, self.text))
418 418
419 419
420 420 _IC = Iterable[Completion]
421 421
422 422
423 423 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
424 424 """
425 425 Deduplicate a set of completions.
426 426
427 427 .. warning::
428 428
429 429 Unstable
430 430
431 431 This function is unstable, API may change without warning.
432 432
433 433 Parameters
434 434 ----------
435 435 text : str
436 436 text that should be completed.
437 437 completions : Iterator[Completion]
438 438 iterator over the completions to deduplicate
439 439
440 440 Yields
441 441 ------
442 442 `Completions` objects
443 443 Completions coming from multiple sources, may be different but end up having
444 444 the same effect when applied to ``text``. If this is the case, this will
445 445 consider completions as equal and only emit the first encountered.
446 446 Not folded in `completions()` yet for debugging purpose, and to detect when
447 447 the IPython completer does return things that Jedi does not, but should be
448 448 at some point.
449 449 """
450 450 completions = list(completions)
451 451 if not completions:
452 452 return
453 453
454 454 new_start = min(c.start for c in completions)
455 455 new_end = max(c.end for c in completions)
456 456
457 457 seen = set()
458 458 for c in completions:
459 459 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
460 460 if new_text not in seen:
461 461 yield c
462 462 seen.add(new_text)
463 463
464 464
465 465 def rectify_completions(text: str, completions: _IC, *, _debug: bool = False) -> _IC:
466 466 """
467 467 Rectify a set of completions to all have the same ``start`` and ``end``
468 468
469 469 .. warning::
470 470
471 471 Unstable
472 472
473 473 This function is unstable, API may change without warning.
474 474 It will also raise unless use in proper context manager.
475 475
476 476 Parameters
477 477 ----------
478 478 text : str
479 479 text that should be completed.
480 480 completions : Iterator[Completion]
481 481 iterator over the completions to rectify
482 482 _debug : bool
483 483 Log failed completion
484 484
485 485 Notes
486 486 -----
487 487 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
488 488 the Jupyter Protocol requires them to behave like so. This will readjust
489 489 the completion to have the same ``start`` and ``end`` by padding both
490 490 extremities with surrounding text.
491 491
492 492 During stabilisation should support a ``_debug`` option to log which
493 493 completion are return by the IPython completer and not found in Jedi in
494 494 order to make upstream bug report.
495 495 """
496 496 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
497 497 "It may change without warnings. "
498 498 "Use in corresponding context manager.",
499 499 category=ProvisionalCompleterWarning, stacklevel=2)
500 500
501 501 completions = list(completions)
502 502 if not completions:
503 503 return
504 504 starts = (c.start for c in completions)
505 505 ends = (c.end for c in completions)
506 506
507 507 new_start = min(starts)
508 508 new_end = max(ends)
509 509
510 510 seen_jedi = set()
511 511 seen_python_matches = set()
512 512 for c in completions:
513 513 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
514 514 if c._origin == 'jedi':
515 515 seen_jedi.add(new_text)
516 516 elif c._origin == 'IPCompleter.python_matches':
517 517 seen_python_matches.add(new_text)
518 518 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
519 519 diff = seen_python_matches.difference(seen_jedi)
520 520 if diff and _debug:
521 521 print('IPython.python matches have extras:', diff)
522 522
523 523
524 524 if sys.platform == 'win32':
525 525 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
526 526 else:
527 527 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
528 528
529 529 GREEDY_DELIMS = ' =\r\n'
530 530
531 531
532 532 class CompletionSplitter(object):
533 533 """An object to split an input line in a manner similar to readline.
534 534
535 535 By having our own implementation, we can expose readline-like completion in
536 536 a uniform manner to all frontends. This object only needs to be given the
537 537 line of text to be split and the cursor position on said line, and it
538 538 returns the 'word' to be completed on at the cursor after splitting the
539 539 entire line.
540 540
541 541 What characters are used as splitting delimiters can be controlled by
542 542 setting the ``delims`` attribute (this is a property that internally
543 543 automatically builds the necessary regular expression)"""
544 544
545 545 # Private interface
546 546
547 547 # A string of delimiter characters. The default value makes sense for
548 548 # IPython's most typical usage patterns.
549 549 _delims = DELIMS
550 550
551 551 # The expression (a normal string) to be compiled into a regular expression
552 552 # for actual splitting. We store it as an attribute mostly for ease of
553 553 # debugging, since this type of code can be so tricky to debug.
554 554 _delim_expr = None
555 555
556 556 # The regular expression that does the actual splitting
557 557 _delim_re = None
558 558
559 559 def __init__(self, delims=None):
560 560 delims = CompletionSplitter._delims if delims is None else delims
561 561 self.delims = delims
562 562
563 563 @property
564 564 def delims(self):
565 565 """Return the string of delimiter characters."""
566 566 return self._delims
567 567
568 568 @delims.setter
569 569 def delims(self, delims):
570 570 """Set the delimiters for line splitting."""
571 571 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
572 572 self._delim_re = re.compile(expr)
573 573 self._delims = delims
574 574 self._delim_expr = expr
575 575
576 576 def split_line(self, line, cursor_pos=None):
577 577 """Split a line of text with a cursor at the given position.
578 578 """
579 579 l = line if cursor_pos is None else line[:cursor_pos]
580 580 return self._delim_re.split(l)[-1]
581 581
582 582
583 583
584 584 class Completer(Configurable):
585 585
586 586 greedy = Bool(False,
587 587 help="""Activate greedy completion
588 588 PENDING DEPRECATION. this is now mostly taken care of with Jedi.
589 589
590 590 This will enable completion on elements of lists, results of function calls, etc.,
591 591 but can be unsafe because the code is actually evaluated on TAB.
592 592 """
593 593 ).tag(config=True)
594 594
595 595 use_jedi = Bool(default_value=JEDI_INSTALLED,
596 596 help="Experimental: Use Jedi to generate autocompletions. "
597 597 "Default to True if jedi is installed.").tag(config=True)
598 598
599 599 jedi_compute_type_timeout = Int(default_value=400,
600 600 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
601 601 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
602 602 performance by preventing jedi to build its cache.
603 603 """).tag(config=True)
604 604
605 605 debug = Bool(default_value=False,
606 606 help='Enable debug for the Completer. Mostly print extra '
607 607 'information for experimental jedi integration.')\
608 608 .tag(config=True)
609 609
610 610 backslash_combining_completions = Bool(True,
611 611 help="Enable unicode completions, e.g. \\alpha<tab> . "
612 612 "Includes completion of latex commands, unicode names, and expanding "
613 613 "unicode characters back to latex commands.").tag(config=True)
614 614
615 615 def __init__(self, namespace=None, global_namespace=None, **kwargs):
616 616 """Create a new completer for the command line.
617 617
618 618 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
619 619
620 620 If unspecified, the default namespace where completions are performed
621 621 is __main__ (technically, __main__.__dict__). Namespaces should be
622 622 given as dictionaries.
623 623
624 624 An optional second namespace can be given. This allows the completer
625 625 to handle cases where both the local and global scopes need to be
626 626 distinguished.
627 627 """
628 628
629 629 # Don't bind to namespace quite yet, but flag whether the user wants a
630 630 # specific namespace or to use __main__.__dict__. This will allow us
631 631 # to bind to __main__.__dict__ at completion time, not now.
632 632 if namespace is None:
633 633 self.use_main_ns = True
634 634 else:
635 635 self.use_main_ns = False
636 636 self.namespace = namespace
637 637
638 638 # The global namespace, if given, can be bound directly
639 639 if global_namespace is None:
640 640 self.global_namespace = {}
641 641 else:
642 642 self.global_namespace = global_namespace
643 643
644 644 self.custom_matchers = []
645 645
646 646 super(Completer, self).__init__(**kwargs)
647 647
648 648 def complete(self, text, state):
649 649 """Return the next possible completion for 'text'.
650 650
651 651 This is called successively with state == 0, 1, 2, ... until it
652 652 returns None. The completion should begin with 'text'.
653 653
654 654 """
655 655 if self.use_main_ns:
656 656 self.namespace = __main__.__dict__
657 657
658 658 if state == 0:
659 659 if "." in text:
660 660 self.matches = self.attr_matches(text)
661 661 else:
662 662 self.matches = self.global_matches(text)
663 663 try:
664 664 return self.matches[state]
665 665 except IndexError:
666 666 return None
667 667
668 668 def global_matches(self, text):
669 669 """Compute matches when text is a simple name.
670 670
671 671 Return a list of all keywords, built-in functions and names currently
672 672 defined in self.namespace or self.global_namespace that match.
673 673
674 674 """
675 675 matches = []
676 676 match_append = matches.append
677 677 n = len(text)
678 678 for lst in [keyword.kwlist,
679 679 builtin_mod.__dict__.keys(),
680 680 self.namespace.keys(),
681 681 self.global_namespace.keys()]:
682 682 for word in lst:
683 683 if word[:n] == text and word != "__builtins__":
684 684 match_append(word)
685 685
686 686 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
687 687 for lst in [self.namespace.keys(),
688 688 self.global_namespace.keys()]:
689 689 shortened = {"_".join([sub[0] for sub in word.split('_')]) : word
690 690 for word in lst if snake_case_re.match(word)}
691 691 for word in shortened.keys():
692 692 if word[:n] == text and word != "__builtins__":
693 693 match_append(shortened[word])
694 694 return matches
695 695
696 696 def attr_matches(self, text):
697 697 """Compute matches when text contains a dot.
698 698
699 699 Assuming the text is of the form NAME.NAME....[NAME], and is
700 700 evaluatable in self.namespace or self.global_namespace, it will be
701 701 evaluated and its attributes (as revealed by dir()) are used as
702 702 possible completions. (For class instances, class members are
703 703 also considered.)
704 704
705 705 WARNING: this can still invoke arbitrary C code, if an object
706 706 with a __getattr__ hook is evaluated.
707 707
708 708 """
709 709
710 710 # Another option, seems to work great. Catches things like ''.<tab>
711 711 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
712 712
713 713 if m:
714 714 expr, attr = m.group(1, 3)
715 715 elif self.greedy:
716 716 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
717 717 if not m2:
718 718 return []
719 719 expr, attr = m2.group(1,2)
720 720 else:
721 721 return []
722 722
723 723 try:
724 724 obj = eval(expr, self.namespace)
725 725 except:
726 726 try:
727 727 obj = eval(expr, self.global_namespace)
728 728 except:
729 729 return []
730 730
731 731 if self.limit_to__all__ and hasattr(obj, '__all__'):
732 732 words = get__all__entries(obj)
733 733 else:
734 734 words = dir2(obj)
735 735
736 736 try:
737 737 words = generics.complete_object(obj, words)
738 738 except TryNext:
739 739 pass
740 740 except AssertionError:
741 741 raise
742 742 except Exception:
743 743 # Silence errors from completion function
744 744 #raise # dbg
745 745 pass
746 746 # Build match list to return
747 747 n = len(attr)
748 748 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
749 749
750 750
751 751 def get__all__entries(obj):
752 752 """returns the strings in the __all__ attribute"""
753 753 try:
754 754 words = getattr(obj, '__all__')
755 755 except:
756 756 return []
757 757
758 758 return [w for w in words if isinstance(w, str)]
759 759
760 760
761 761 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], prefix: str, delims: str,
762 762 extra_prefix: Optional[Tuple[str, bytes]]=None) -> Tuple[str, int, List[str]]:
763 763 """Used by dict_key_matches, matching the prefix to a list of keys
764 764
765 765 Parameters
766 766 ----------
767 767 keys
768 768 list of keys in dictionary currently being completed.
769 769 prefix
770 770 Part of the text already typed by the user. E.g. `mydict[b'fo`
771 771 delims
772 772 String of delimiters to consider when finding the current key.
773 773 extra_prefix : optional
774 774 Part of the text already typed in multi-key index cases. E.g. for
775 775 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
776 776
777 777 Returns
778 778 -------
779 779 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
780 780 ``quote`` being the quote that need to be used to close current string.
781 781 ``token_start`` the position where the replacement should start occurring,
782 782 ``matches`` a list of replacement/completion
783 783
784 784 """
785 785 prefix_tuple = extra_prefix if extra_prefix else ()
786 786 Nprefix = len(prefix_tuple)
787 787 def filter_prefix_tuple(key):
788 788 # Reject too short keys
789 789 if len(key) <= Nprefix:
790 790 return False
791 791 # Reject keys with non str/bytes in it
792 792 for k in key:
793 793 if not isinstance(k, (str, bytes)):
794 794 return False
795 795 # Reject keys that do not match the prefix
796 796 for k, pt in zip(key, prefix_tuple):
797 797 if k != pt:
798 798 return False
799 799 # All checks passed!
800 800 return True
801 801
802 802 filtered_keys:List[Union[str,bytes]] = []
803 803 def _add_to_filtered_keys(key):
804 804 if isinstance(key, (str, bytes)):
805 805 filtered_keys.append(key)
806 806
807 807 for k in keys:
808 808 if isinstance(k, tuple):
809 809 if filter_prefix_tuple(k):
810 810 _add_to_filtered_keys(k[Nprefix])
811 811 else:
812 812 _add_to_filtered_keys(k)
813 813
814 814 if not prefix:
815 815 return '', 0, [repr(k) for k in filtered_keys]
816 816 quote_match = re.search('["\']', prefix)
817 817 assert quote_match is not None # silence mypy
818 818 quote = quote_match.group()
819 819 try:
820 820 prefix_str = eval(prefix + quote, {})
821 821 except Exception:
822 822 return '', 0, []
823 823
824 824 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
825 825 token_match = re.search(pattern, prefix, re.UNICODE)
826 826 assert token_match is not None # silence mypy
827 827 token_start = token_match.start()
828 828 token_prefix = token_match.group()
829 829
830 830 matched:List[str] = []
831 831 for key in filtered_keys:
832 832 try:
833 833 if not key.startswith(prefix_str):
834 834 continue
835 835 except (AttributeError, TypeError, UnicodeError):
836 836 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
837 837 continue
838 838
839 839 # reformat remainder of key to begin with prefix
840 840 rem = key[len(prefix_str):]
841 841 # force repr wrapped in '
842 842 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
843 843 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
844 844 if quote == '"':
845 845 # The entered prefix is quoted with ",
846 846 # but the match is quoted with '.
847 847 # A contained " hence needs escaping for comparison:
848 848 rem_repr = rem_repr.replace('"', '\\"')
849 849
850 850 # then reinsert prefix from start of token
851 851 matched.append('%s%s' % (token_prefix, rem_repr))
852 852 return quote, token_start, matched
853 853
854 854
855 855 def cursor_to_position(text:str, line:int, column:int)->int:
856 856 """
857 857 Convert the (line,column) position of the cursor in text to an offset in a
858 858 string.
859 859
860 860 Parameters
861 861 ----------
862 862 text : str
863 863 The text in which to calculate the cursor offset
864 864 line : int
865 865 Line of the cursor; 0-indexed
866 866 column : int
867 867 Column of the cursor 0-indexed
868 868
869 869 Returns
870 870 -------
871 871 Position of the cursor in ``text``, 0-indexed.
872 872
873 873 See Also
874 874 --------
875 875 position_to_cursor : reciprocal of this function
876 876
877 877 """
878 878 lines = text.split('\n')
879 879 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
880 880
881 881 return sum(len(l) + 1 for l in lines[:line]) + column
882 882
883 883 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
884 884 """
885 885 Convert the position of the cursor in text (0 indexed) to a line
886 886 number(0-indexed) and a column number (0-indexed) pair
887 887
888 888 Position should be a valid position in ``text``.
889 889
890 890 Parameters
891 891 ----------
892 892 text : str
893 893 The text in which to calculate the cursor offset
894 894 offset : int
895 895 Position of the cursor in ``text``, 0-indexed.
896 896
897 897 Returns
898 898 -------
899 899 (line, column) : (int, int)
900 900 Line of the cursor; 0-indexed, column of the cursor 0-indexed
901 901
902 902 See Also
903 903 --------
904 904 cursor_to_position : reciprocal of this function
905 905
906 906 """
907 907
908 908 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
909 909
910 910 before = text[:offset]
911 911 blines = before.split('\n') # ! splitnes trim trailing \n
912 912 line = before.count('\n')
913 913 col = len(blines[-1])
914 914 return line, col
915 915
916 916
917 917 def _safe_isinstance(obj, module, class_name):
918 918 """Checks if obj is an instance of module.class_name if loaded
919 919 """
920 920 return (module in sys.modules and
921 921 isinstance(obj, getattr(import_module(module), class_name)))
922 922
923 923 def back_unicode_name_matches(text:str) -> Tuple[str, Sequence[str]]:
924 924 """Match Unicode characters back to Unicode name
925 925
926 926 This does ``β˜ƒ`` -> ``\\snowman``
927 927
928 928 Note that snowman is not a valid python3 combining character but will be expanded.
929 929 Though it will not recombine back to the snowman character by the completion machinery.
930 930
931 931 This will not either back-complete standard sequences like \\n, \\b ...
932 932
933 933 Returns
934 934 =======
935 935
936 936 Return a tuple with two elements:
937 937
938 938 - The Unicode character that was matched (preceded with a backslash), or
939 939 empty string,
940 940 - a sequence (of 1), name for the match Unicode character, preceded by
941 941 backslash, or empty if no match.
942 942
943 943 """
944 944 if len(text)<2:
945 945 return '', ()
946 946 maybe_slash = text[-2]
947 947 if maybe_slash != '\\':
948 948 return '', ()
949 949
950 950 char = text[-1]
951 951 # no expand on quote for completion in strings.
952 952 # nor backcomplete standard ascii keys
953 953 if char in string.ascii_letters or char in ('"',"'"):
954 954 return '', ()
955 955 try :
956 956 unic = unicodedata.name(char)
957 957 return '\\'+char,('\\'+unic,)
958 958 except KeyError:
959 959 pass
960 960 return '', ()
961 961
962 962 def back_latex_name_matches(text:str) -> Tuple[str, Sequence[str]] :
963 963 """Match latex characters back to unicode name
964 964
965 965 This does ``\\β„΅`` -> ``\\aleph``
966 966
967 967 """
968 968 if len(text)<2:
969 969 return '', ()
970 970 maybe_slash = text[-2]
971 971 if maybe_slash != '\\':
972 972 return '', ()
973 973
974 974
975 975 char = text[-1]
976 976 # no expand on quote for completion in strings.
977 977 # nor backcomplete standard ascii keys
978 978 if char in string.ascii_letters or char in ('"',"'"):
979 979 return '', ()
980 980 try :
981 981 latex = reverse_latex_symbol[char]
982 982 # '\\' replace the \ as well
983 983 return '\\'+char,[latex]
984 984 except KeyError:
985 985 pass
986 986 return '', ()
987 987
988 988
989 989 def _formatparamchildren(parameter) -> str:
990 990 """
991 991 Get parameter name and value from Jedi Private API
992 992
993 993 Jedi does not expose a simple way to get `param=value` from its API.
994 994
995 995 Parameters
996 996 ----------
997 997 parameter
998 998 Jedi's function `Param`
999 999
1000 1000 Returns
1001 1001 -------
1002 1002 A string like 'a', 'b=1', '*args', '**kwargs'
1003 1003
1004 1004 """
1005 1005 description = parameter.description
1006 1006 if not description.startswith('param '):
1007 1007 raise ValueError('Jedi function parameter description have change format.'
1008 1008 'Expected "param ...", found %r".' % description)
1009 1009 return description[6:]
1010 1010
1011 1011 def _make_signature(completion)-> str:
1012 1012 """
1013 1013 Make the signature from a jedi completion
1014 1014
1015 1015 Parameters
1016 1016 ----------
1017 1017 completion : jedi.Completion
1018 1018 object does not complete a function type
1019 1019
1020 1020 Returns
1021 1021 -------
1022 1022 a string consisting of the function signature, with the parenthesis but
1023 1023 without the function name. example:
1024 1024 `(a, *args, b=1, **kwargs)`
1025 1025
1026 1026 """
1027 1027
1028 1028 # it looks like this might work on jedi 0.17
1029 1029 if hasattr(completion, 'get_signatures'):
1030 1030 signatures = completion.get_signatures()
1031 1031 if not signatures:
1032 1032 return '(?)'
1033 1033
1034 1034 c0 = completion.get_signatures()[0]
1035 1035 return '('+c0.to_string().split('(', maxsplit=1)[1]
1036 1036
1037 1037 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1038 1038 for p in signature.defined_names()) if f])
1039 1039
1040 1040
1041 1041 class _CompleteResult(NamedTuple):
1042 1042 matched_text : str
1043 1043 matches: Sequence[str]
1044 1044 matches_origin: Sequence[str]
1045 1045 jedi_matches: Any
1046 1046
1047 1047
1048 1048 class IPCompleter(Completer):
1049 1049 """Extension of the completer class with IPython-specific features"""
1050 1050
1051 1051 __dict_key_regexps: Optional[Dict[bool,Pattern]] = None
1052 1052
1053 1053 @observe('greedy')
1054 1054 def _greedy_changed(self, change):
1055 1055 """update the splitter and readline delims when greedy is changed"""
1056 1056 if change['new']:
1057 1057 self.splitter.delims = GREEDY_DELIMS
1058 1058 else:
1059 1059 self.splitter.delims = DELIMS
1060 1060
1061 1061 dict_keys_only = Bool(False,
1062 1062 help="""Whether to show dict key matches only""")
1063 1063
1064 1064 merge_completions = Bool(True,
1065 1065 help="""Whether to merge completion results into a single list
1066 1066
1067 1067 If False, only the completion results from the first non-empty
1068 1068 completer will be returned.
1069 1069 """
1070 1070 ).tag(config=True)
1071 1071 omit__names = Enum((0,1,2), default_value=2,
1072 1072 help="""Instruct the completer to omit private method names
1073 1073
1074 1074 Specifically, when completing on ``object.<tab>``.
1075 1075
1076 1076 When 2 [default]: all names that start with '_' will be excluded.
1077 1077
1078 1078 When 1: all 'magic' names (``__foo__``) will be excluded.
1079 1079
1080 1080 When 0: nothing will be excluded.
1081 1081 """
1082 1082 ).tag(config=True)
1083 1083 limit_to__all__ = Bool(False,
1084 1084 help="""
1085 1085 DEPRECATED as of version 5.0.
1086 1086
1087 1087 Instruct the completer to use __all__ for the completion
1088 1088
1089 1089 Specifically, when completing on ``object.<tab>``.
1090 1090
1091 1091 When True: only those names in obj.__all__ will be included.
1092 1092
1093 1093 When False [default]: the __all__ attribute is ignored
1094 1094 """,
1095 1095 ).tag(config=True)
1096 1096
1097 1097 profile_completions = Bool(
1098 1098 default_value=False,
1099 1099 help="If True, emit profiling data for completion subsystem using cProfile."
1100 1100 ).tag(config=True)
1101 1101
1102 1102 profiler_output_dir = Unicode(
1103 1103 default_value=".completion_profiles",
1104 1104 help="Template for path at which to output profile data for completions."
1105 1105 ).tag(config=True)
1106 1106
1107 1107 @observe('limit_to__all__')
1108 1108 def _limit_to_all_changed(self, change):
1109 1109 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1110 1110 'value has been deprecated since IPython 5.0, will be made to have '
1111 1111 'no effects and then removed in future version of IPython.',
1112 1112 UserWarning)
1113 1113
1114 1114 def __init__(
1115 1115 self, shell=None, namespace=None, global_namespace=None, config=None, **kwargs
1116 1116 ):
1117 1117 """IPCompleter() -> completer
1118 1118
1119 1119 Return a completer object.
1120 1120
1121 1121 Parameters
1122 1122 ----------
1123 1123 shell
1124 1124 a pointer to the ipython shell itself. This is needed
1125 1125 because this completer knows about magic functions, and those can
1126 1126 only be accessed via the ipython instance.
1127 1127 namespace : dict, optional
1128 1128 an optional dict where completions are performed.
1129 1129 global_namespace : dict, optional
1130 1130 secondary optional dict for completions, to
1131 1131 handle cases (such as IPython embedded inside functions) where
1132 1132 both Python scopes are visible.
1133 1133 config : Config
1134 1134 traitlet's config object
1135 1135 **kwargs
1136 1136 passed to super class unmodified.
1137 1137 """
1138 1138
1139 1139 self.magic_escape = ESC_MAGIC
1140 1140 self.splitter = CompletionSplitter()
1141 1141
1142 1142 # _greedy_changed() depends on splitter and readline being defined:
1143 1143 super().__init__(
1144 1144 namespace=namespace,
1145 1145 global_namespace=global_namespace,
1146 1146 config=config,
1147 1147 **kwargs
1148 1148 )
1149 1149
1150 1150 # List where completion matches will be stored
1151 1151 self.matches = []
1152 1152 self.shell = shell
1153 1153 # Regexp to split filenames with spaces in them
1154 1154 self.space_name_re = re.compile(r'([^\\] )')
1155 1155 # Hold a local ref. to glob.glob for speed
1156 1156 self.glob = glob.glob
1157 1157
1158 1158 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1159 1159 # buffers, to avoid completion problems.
1160 1160 term = os.environ.get('TERM','xterm')
1161 1161 self.dumb_terminal = term in ['dumb','emacs']
1162 1162
1163 1163 # Special handling of backslashes needed in win32 platforms
1164 1164 if sys.platform == "win32":
1165 1165 self.clean_glob = self._clean_glob_win32
1166 1166 else:
1167 1167 self.clean_glob = self._clean_glob
1168 1168
1169 1169 #regexp to parse docstring for function signature
1170 1170 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1171 1171 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1172 1172 #use this if positional argument name is also needed
1173 1173 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1174 1174
1175 1175 self.magic_arg_matchers = [
1176 1176 self.magic_config_matches,
1177 1177 self.magic_color_matches,
1178 1178 ]
1179 1179
1180 1180 # This is set externally by InteractiveShell
1181 1181 self.custom_completers = None
1182 1182
1183 1183 # This is a list of names of unicode characters that can be completed
1184 1184 # into their corresponding unicode value. The list is large, so we
1185 1185 # lazily initialize it on first use. Consuming code should access this
1186 1186 # attribute through the `@unicode_names` property.
1187 1187 self._unicode_names = None
1188 1188
1189 1189 @property
1190 1190 def matchers(self) -> List[Any]:
1191 1191 """All active matcher routines for completion"""
1192 1192 if self.dict_keys_only:
1193 1193 return [self.dict_key_matches]
1194 1194
1195 1195 if self.use_jedi:
1196 1196 return [
1197 1197 *self.custom_matchers,
1198 1198 self.dict_key_matches,
1199 1199 self.file_matches,
1200 1200 self.magic_matches,
1201 1201 ]
1202 1202 else:
1203 1203 return [
1204 1204 *self.custom_matchers,
1205 1205 self.dict_key_matches,
1206 1206 self.python_matches,
1207 1207 self.file_matches,
1208 1208 self.magic_matches,
1209 1209 self.python_func_kw_matches,
1210 1210 ]
1211 1211
1212 1212 def all_completions(self, text:str) -> List[str]:
1213 1213 """
1214 1214 Wrapper around the completion methods for the benefit of emacs.
1215 1215 """
1216 1216 prefix = text.rpartition('.')[0]
1217 1217 with provisionalcompleter():
1218 1218 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1219 1219 for c in self.completions(text, len(text))]
1220 1220
1221 1221 return self.complete(text)[1]
1222 1222
1223 1223 def _clean_glob(self, text:str):
1224 1224 return self.glob("%s*" % text)
1225 1225
1226 1226 def _clean_glob_win32(self, text:str):
1227 1227 return [f.replace("\\","/")
1228 1228 for f in self.glob("%s*" % text)]
1229 1229
1230 1230 def file_matches(self, text:str)->List[str]:
1231 1231 """Match filenames, expanding ~USER type strings.
1232 1232
1233 1233 Most of the seemingly convoluted logic in this completer is an
1234 1234 attempt to handle filenames with spaces in them. And yet it's not
1235 1235 quite perfect, because Python's readline doesn't expose all of the
1236 1236 GNU readline details needed for this to be done correctly.
1237 1237
1238 1238 For a filename with a space in it, the printed completions will be
1239 1239 only the parts after what's already been typed (instead of the
1240 1240 full completions, as is normally done). I don't think with the
1241 1241 current (as of Python 2.3) Python readline it's possible to do
1242 1242 better."""
1243 1243
1244 1244 # chars that require escaping with backslash - i.e. chars
1245 1245 # that readline treats incorrectly as delimiters, but we
1246 1246 # don't want to treat as delimiters in filename matching
1247 1247 # when escaped with backslash
1248 1248 if text.startswith('!'):
1249 1249 text = text[1:]
1250 1250 text_prefix = u'!'
1251 1251 else:
1252 1252 text_prefix = u''
1253 1253
1254 1254 text_until_cursor = self.text_until_cursor
1255 1255 # track strings with open quotes
1256 1256 open_quotes = has_open_quotes(text_until_cursor)
1257 1257
1258 1258 if '(' in text_until_cursor or '[' in text_until_cursor:
1259 1259 lsplit = text
1260 1260 else:
1261 1261 try:
1262 1262 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1263 1263 lsplit = arg_split(text_until_cursor)[-1]
1264 1264 except ValueError:
1265 1265 # typically an unmatched ", or backslash without escaped char.
1266 1266 if open_quotes:
1267 1267 lsplit = text_until_cursor.split(open_quotes)[-1]
1268 1268 else:
1269 1269 return []
1270 1270 except IndexError:
1271 1271 # tab pressed on empty line
1272 1272 lsplit = ""
1273 1273
1274 1274 if not open_quotes and lsplit != protect_filename(lsplit):
1275 1275 # if protectables are found, do matching on the whole escaped name
1276 1276 has_protectables = True
1277 1277 text0,text = text,lsplit
1278 1278 else:
1279 1279 has_protectables = False
1280 1280 text = os.path.expanduser(text)
1281 1281
1282 1282 if text == "":
1283 1283 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1284 1284
1285 1285 # Compute the matches from the filesystem
1286 1286 if sys.platform == 'win32':
1287 1287 m0 = self.clean_glob(text)
1288 1288 else:
1289 1289 m0 = self.clean_glob(text.replace('\\', ''))
1290 1290
1291 1291 if has_protectables:
1292 1292 # If we had protectables, we need to revert our changes to the
1293 1293 # beginning of filename so that we don't double-write the part
1294 1294 # of the filename we have so far
1295 1295 len_lsplit = len(lsplit)
1296 1296 matches = [text_prefix + text0 +
1297 1297 protect_filename(f[len_lsplit:]) for f in m0]
1298 1298 else:
1299 1299 if open_quotes:
1300 1300 # if we have a string with an open quote, we don't need to
1301 1301 # protect the names beyond the quote (and we _shouldn't_, as
1302 1302 # it would cause bugs when the filesystem call is made).
1303 1303 matches = m0 if sys.platform == "win32" else\
1304 1304 [protect_filename(f, open_quotes) for f in m0]
1305 1305 else:
1306 1306 matches = [text_prefix +
1307 1307 protect_filename(f) for f in m0]
1308 1308
1309 1309 # Mark directories in input list by appending '/' to their names.
1310 1310 return [x+'/' if os.path.isdir(x) else x for x in matches]
1311 1311
1312 1312 def magic_matches(self, text:str):
1313 1313 """Match magics"""
1314 1314 # Get all shell magics now rather than statically, so magics loaded at
1315 1315 # runtime show up too.
1316 1316 lsm = self.shell.magics_manager.lsmagic()
1317 1317 line_magics = lsm['line']
1318 1318 cell_magics = lsm['cell']
1319 1319 pre = self.magic_escape
1320 1320 pre2 = pre+pre
1321 1321
1322 1322 explicit_magic = text.startswith(pre)
1323 1323
1324 1324 # Completion logic:
1325 1325 # - user gives %%: only do cell magics
1326 1326 # - user gives %: do both line and cell magics
1327 1327 # - no prefix: do both
1328 1328 # In other words, line magics are skipped if the user gives %% explicitly
1329 1329 #
1330 1330 # We also exclude magics that match any currently visible names:
1331 1331 # https://github.com/ipython/ipython/issues/4877, unless the user has
1332 1332 # typed a %:
1333 1333 # https://github.com/ipython/ipython/issues/10754
1334 1334 bare_text = text.lstrip(pre)
1335 1335 global_matches = self.global_matches(bare_text)
1336 1336 if not explicit_magic:
1337 1337 def matches(magic):
1338 1338 """
1339 1339 Filter magics, in particular remove magics that match
1340 1340 a name present in global namespace.
1341 1341 """
1342 1342 return ( magic.startswith(bare_text) and
1343 1343 magic not in global_matches )
1344 1344 else:
1345 1345 def matches(magic):
1346 1346 return magic.startswith(bare_text)
1347 1347
1348 1348 comp = [ pre2+m for m in cell_magics if matches(m)]
1349 1349 if not text.startswith(pre2):
1350 1350 comp += [ pre+m for m in line_magics if matches(m)]
1351 1351
1352 1352 return comp
1353 1353
1354 1354 def magic_config_matches(self, text:str) -> List[str]:
1355 1355 """ Match class names and attributes for %config magic """
1356 1356 texts = text.strip().split()
1357 1357
1358 1358 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
1359 1359 # get all configuration classes
1360 1360 classes = sorted(set([ c for c in self.shell.configurables
1361 1361 if c.__class__.class_traits(config=True)
1362 1362 ]), key=lambda x: x.__class__.__name__)
1363 1363 classnames = [ c.__class__.__name__ for c in classes ]
1364 1364
1365 1365 # return all classnames if config or %config is given
1366 1366 if len(texts) == 1:
1367 1367 return classnames
1368 1368
1369 1369 # match classname
1370 1370 classname_texts = texts[1].split('.')
1371 1371 classname = classname_texts[0]
1372 1372 classname_matches = [ c for c in classnames
1373 1373 if c.startswith(classname) ]
1374 1374
1375 1375 # return matched classes or the matched class with attributes
1376 1376 if texts[1].find('.') < 0:
1377 1377 return classname_matches
1378 1378 elif len(classname_matches) == 1 and \
1379 1379 classname_matches[0] == classname:
1380 1380 cls = classes[classnames.index(classname)].__class__
1381 1381 help = cls.class_get_help()
1382 1382 # strip leading '--' from cl-args:
1383 1383 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1384 1384 return [ attr.split('=')[0]
1385 1385 for attr in help.strip().splitlines()
1386 1386 if attr.startswith(texts[1]) ]
1387 1387 return []
1388 1388
1389 1389 def magic_color_matches(self, text:str) -> List[str] :
1390 1390 """ Match color schemes for %colors magic"""
1391 1391 texts = text.split()
1392 1392 if text.endswith(' '):
1393 1393 # .split() strips off the trailing whitespace. Add '' back
1394 1394 # so that: '%colors ' -> ['%colors', '']
1395 1395 texts.append('')
1396 1396
1397 1397 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
1398 1398 prefix = texts[1]
1399 1399 return [ color for color in InspectColors.keys()
1400 1400 if color.startswith(prefix) ]
1401 1401 return []
1402 1402
1403 1403 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str) -> Iterable[Any]:
1404 1404 """
1405 1405 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1406 1406 cursor position.
1407 1407
1408 1408 Parameters
1409 1409 ----------
1410 1410 cursor_column : int
1411 1411 column position of the cursor in ``text``, 0-indexed.
1412 1412 cursor_line : int
1413 1413 line position of the cursor in ``text``, 0-indexed
1414 1414 text : str
1415 1415 text to complete
1416 1416
1417 1417 Notes
1418 1418 -----
1419 1419 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1420 1420 object containing a string with the Jedi debug information attached.
1421 1421 """
1422 1422 namespaces = [self.namespace]
1423 1423 if self.global_namespace is not None:
1424 1424 namespaces.append(self.global_namespace)
1425 1425
1426 1426 completion_filter = lambda x:x
1427 1427 offset = cursor_to_position(text, cursor_line, cursor_column)
1428 1428 # filter output if we are completing for object members
1429 1429 if offset:
1430 1430 pre = text[offset-1]
1431 1431 if pre == '.':
1432 1432 if self.omit__names == 2:
1433 1433 completion_filter = lambda c:not c.name.startswith('_')
1434 1434 elif self.omit__names == 1:
1435 1435 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1436 1436 elif self.omit__names == 0:
1437 1437 completion_filter = lambda x:x
1438 1438 else:
1439 1439 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1440 1440
1441 1441 interpreter = jedi.Interpreter(text[:offset], namespaces)
1442 1442 try_jedi = True
1443 1443
1444 1444 try:
1445 1445 # find the first token in the current tree -- if it is a ' or " then we are in a string
1446 1446 completing_string = False
1447 1447 try:
1448 1448 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
1449 1449 except StopIteration:
1450 1450 pass
1451 1451 else:
1452 1452 # note the value may be ', ", or it may also be ''' or """, or
1453 1453 # in some cases, """what/you/typed..., but all of these are
1454 1454 # strings.
1455 1455 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
1456 1456
1457 1457 # if we are in a string jedi is likely not the right candidate for
1458 1458 # now. Skip it.
1459 1459 try_jedi = not completing_string
1460 1460 except Exception as e:
1461 1461 # many of things can go wrong, we are using private API just don't crash.
1462 1462 if self.debug:
1463 1463 print("Error detecting if completing a non-finished string :", e, '|')
1464 1464
1465 1465 if not try_jedi:
1466 1466 return []
1467 1467 try:
1468 1468 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
1469 1469 except Exception as e:
1470 1470 if self.debug:
1471 1471 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1472 1472 else:
1473 1473 return []
1474 1474
1475 1475 def python_matches(self, text:str)->List[str]:
1476 1476 """Match attributes or global python names"""
1477 1477 if "." in text:
1478 1478 try:
1479 1479 matches = self.attr_matches(text)
1480 1480 if text.endswith('.') and self.omit__names:
1481 1481 if self.omit__names == 1:
1482 1482 # true if txt is _not_ a __ name, false otherwise:
1483 1483 no__name = (lambda txt:
1484 1484 re.match(r'.*\.__.*?__',txt) is None)
1485 1485 else:
1486 1486 # true if txt is _not_ a _ name, false otherwise:
1487 1487 no__name = (lambda txt:
1488 1488 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1489 1489 matches = filter(no__name, matches)
1490 1490 except NameError:
1491 1491 # catches <undefined attributes>.<tab>
1492 1492 matches = []
1493 1493 else:
1494 1494 matches = self.global_matches(text)
1495 1495 return matches
1496 1496
1497 1497 def _default_arguments_from_docstring(self, doc):
1498 1498 """Parse the first line of docstring for call signature.
1499 1499
1500 1500 Docstring should be of the form 'min(iterable[, key=func])\n'.
1501 1501 It can also parse cython docstring of the form
1502 1502 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1503 1503 """
1504 1504 if doc is None:
1505 1505 return []
1506 1506
1507 1507 #care only the firstline
1508 1508 line = doc.lstrip().splitlines()[0]
1509 1509
1510 1510 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1511 1511 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1512 1512 sig = self.docstring_sig_re.search(line)
1513 1513 if sig is None:
1514 1514 return []
1515 1515 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1516 1516 sig = sig.groups()[0].split(',')
1517 1517 ret = []
1518 1518 for s in sig:
1519 1519 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1520 1520 ret += self.docstring_kwd_re.findall(s)
1521 1521 return ret
1522 1522
1523 1523 def _default_arguments(self, obj):
1524 1524 """Return the list of default arguments of obj if it is callable,
1525 1525 or empty list otherwise."""
1526 1526 call_obj = obj
1527 1527 ret = []
1528 1528 if inspect.isbuiltin(obj):
1529 1529 pass
1530 1530 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1531 1531 if inspect.isclass(obj):
1532 1532 #for cython embedsignature=True the constructor docstring
1533 1533 #belongs to the object itself not __init__
1534 1534 ret += self._default_arguments_from_docstring(
1535 1535 getattr(obj, '__doc__', ''))
1536 1536 # for classes, check for __init__,__new__
1537 1537 call_obj = (getattr(obj, '__init__', None) or
1538 1538 getattr(obj, '__new__', None))
1539 1539 # for all others, check if they are __call__able
1540 1540 elif hasattr(obj, '__call__'):
1541 1541 call_obj = obj.__call__
1542 1542 ret += self._default_arguments_from_docstring(
1543 1543 getattr(call_obj, '__doc__', ''))
1544 1544
1545 1545 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1546 1546 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1547 1547
1548 1548 try:
1549 1549 sig = inspect.signature(obj)
1550 1550 ret.extend(k for k, v in sig.parameters.items() if
1551 1551 v.kind in _keeps)
1552 1552 except ValueError:
1553 1553 pass
1554 1554
1555 1555 return list(set(ret))
1556 1556
1557 1557 def python_func_kw_matches(self, text):
1558 1558 """Match named parameters (kwargs) of the last open function"""
1559 1559
1560 1560 if "." in text: # a parameter cannot be dotted
1561 1561 return []
1562 1562 try: regexp = self.__funcParamsRegex
1563 1563 except AttributeError:
1564 1564 regexp = self.__funcParamsRegex = re.compile(r'''
1565 1565 '.*?(?<!\\)' | # single quoted strings or
1566 1566 ".*?(?<!\\)" | # double quoted strings or
1567 1567 \w+ | # identifier
1568 1568 \S # other characters
1569 1569 ''', re.VERBOSE | re.DOTALL)
1570 1570 # 1. find the nearest identifier that comes before an unclosed
1571 1571 # parenthesis before the cursor
1572 1572 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1573 1573 tokens = regexp.findall(self.text_until_cursor)
1574 1574 iterTokens = reversed(tokens); openPar = 0
1575 1575
1576 1576 for token in iterTokens:
1577 1577 if token == ')':
1578 1578 openPar -= 1
1579 1579 elif token == '(':
1580 1580 openPar += 1
1581 1581 if openPar > 0:
1582 1582 # found the last unclosed parenthesis
1583 1583 break
1584 1584 else:
1585 1585 return []
1586 1586 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1587 1587 ids = []
1588 1588 isId = re.compile(r'\w+$').match
1589 1589
1590 1590 while True:
1591 1591 try:
1592 1592 ids.append(next(iterTokens))
1593 1593 if not isId(ids[-1]):
1594 1594 ids.pop(); break
1595 1595 if not next(iterTokens) == '.':
1596 1596 break
1597 1597 except StopIteration:
1598 1598 break
1599 1599
1600 1600 # Find all named arguments already assigned to, as to avoid suggesting
1601 1601 # them again
1602 1602 usedNamedArgs = set()
1603 1603 par_level = -1
1604 1604 for token, next_token in zip(tokens, tokens[1:]):
1605 1605 if token == '(':
1606 1606 par_level += 1
1607 1607 elif token == ')':
1608 1608 par_level -= 1
1609 1609
1610 1610 if par_level != 0:
1611 1611 continue
1612 1612
1613 1613 if next_token != '=':
1614 1614 continue
1615 1615
1616 1616 usedNamedArgs.add(token)
1617 1617
1618 1618 argMatches = []
1619 1619 try:
1620 1620 callableObj = '.'.join(ids[::-1])
1621 1621 namedArgs = self._default_arguments(eval(callableObj,
1622 1622 self.namespace))
1623 1623
1624 1624 # Remove used named arguments from the list, no need to show twice
1625 1625 for namedArg in set(namedArgs) - usedNamedArgs:
1626 1626 if namedArg.startswith(text):
1627 1627 argMatches.append("%s=" %namedArg)
1628 1628 except:
1629 1629 pass
1630 1630
1631 1631 return argMatches
1632 1632
1633 1633 @staticmethod
1634 1634 def _get_keys(obj: Any) -> List[Any]:
1635 1635 # Objects can define their own completions by defining an
1636 1636 # _ipy_key_completions_() method.
1637 1637 method = get_real_method(obj, '_ipython_key_completions_')
1638 1638 if method is not None:
1639 1639 return method()
1640 1640
1641 1641 # Special case some common in-memory dict-like types
1642 1642 if isinstance(obj, dict) or\
1643 1643 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1644 1644 try:
1645 1645 return list(obj.keys())
1646 1646 except Exception:
1647 1647 return []
1648 1648 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1649 1649 _safe_isinstance(obj, 'numpy', 'void'):
1650 1650 return obj.dtype.names or []
1651 1651 return []
1652 1652
1653 1653 def dict_key_matches(self, text:str) -> List[str]:
1654 1654 "Match string keys in a dictionary, after e.g. 'foo[' "
1655 1655
1656 1656
1657 1657 if self.__dict_key_regexps is not None:
1658 1658 regexps = self.__dict_key_regexps
1659 1659 else:
1660 1660 dict_key_re_fmt = r'''(?x)
1661 1661 ( # match dict-referring expression wrt greedy setting
1662 1662 %s
1663 1663 )
1664 1664 \[ # open bracket
1665 1665 \s* # and optional whitespace
1666 1666 # Capture any number of str-like objects (e.g. "a", "b", 'c')
1667 1667 ((?:[uUbB]? # string prefix (r not handled)
1668 1668 (?:
1669 1669 '(?:[^']|(?<!\\)\\')*'
1670 1670 |
1671 1671 "(?:[^"]|(?<!\\)\\")*"
1672 1672 )
1673 1673 \s*,\s*
1674 1674 )*)
1675 1675 ([uUbB]? # string prefix (r not handled)
1676 1676 (?: # unclosed string
1677 1677 '(?:[^']|(?<!\\)\\')*
1678 1678 |
1679 1679 "(?:[^"]|(?<!\\)\\")*
1680 1680 )
1681 1681 )?
1682 1682 $
1683 1683 '''
1684 1684 regexps = self.__dict_key_regexps = {
1685 1685 False: re.compile(dict_key_re_fmt % r'''
1686 1686 # identifiers separated by .
1687 1687 (?!\d)\w+
1688 1688 (?:\.(?!\d)\w+)*
1689 1689 '''),
1690 1690 True: re.compile(dict_key_re_fmt % '''
1691 1691 .+
1692 1692 ''')
1693 1693 }
1694 1694
1695 1695 match = regexps[self.greedy].search(self.text_until_cursor)
1696 1696
1697 1697 if match is None:
1698 1698 return []
1699 1699
1700 1700 expr, prefix0, prefix = match.groups()
1701 1701 try:
1702 1702 obj = eval(expr, self.namespace)
1703 1703 except Exception:
1704 1704 try:
1705 1705 obj = eval(expr, self.global_namespace)
1706 1706 except Exception:
1707 1707 return []
1708 1708
1709 1709 keys = self._get_keys(obj)
1710 1710 if not keys:
1711 1711 return keys
1712 1712
1713 1713 extra_prefix = eval(prefix0) if prefix0 != '' else None
1714 1714
1715 1715 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims, extra_prefix=extra_prefix)
1716 1716 if not matches:
1717 1717 return matches
1718 1718
1719 1719 # get the cursor position of
1720 1720 # - the text being completed
1721 1721 # - the start of the key text
1722 1722 # - the start of the completion
1723 1723 text_start = len(self.text_until_cursor) - len(text)
1724 1724 if prefix:
1725 1725 key_start = match.start(3)
1726 1726 completion_start = key_start + token_offset
1727 1727 else:
1728 1728 key_start = completion_start = match.end()
1729 1729
1730 1730 # grab the leading prefix, to make sure all completions start with `text`
1731 1731 if text_start > key_start:
1732 1732 leading = ''
1733 1733 else:
1734 1734 leading = text[text_start:completion_start]
1735 1735
1736 1736 # the index of the `[` character
1737 1737 bracket_idx = match.end(1)
1738 1738
1739 1739 # append closing quote and bracket as appropriate
1740 1740 # this is *not* appropriate if the opening quote or bracket is outside
1741 1741 # the text given to this method
1742 1742 suf = ''
1743 1743 continuation = self.line_buffer[len(self.text_until_cursor):]
1744 1744 if key_start > text_start and closing_quote:
1745 1745 # quotes were opened inside text, maybe close them
1746 1746 if continuation.startswith(closing_quote):
1747 1747 continuation = continuation[len(closing_quote):]
1748 1748 else:
1749 1749 suf += closing_quote
1750 1750 if bracket_idx > text_start:
1751 1751 # brackets were opened inside text, maybe close them
1752 1752 if not continuation.startswith(']'):
1753 1753 suf += ']'
1754 1754
1755 1755 return [leading + k + suf for k in matches]
1756 1756
1757 1757 @staticmethod
1758 1758 def unicode_name_matches(text:str) -> Tuple[str, List[str]] :
1759 1759 """Match Latex-like syntax for unicode characters base
1760 1760 on the name of the character.
1761 1761
1762 1762 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
1763 1763
1764 1764 Works only on valid python 3 identifier, or on combining characters that
1765 1765 will combine to form a valid identifier.
1766 1766 """
1767 1767 slashpos = text.rfind('\\')
1768 1768 if slashpos > -1:
1769 1769 s = text[slashpos+1:]
1770 1770 try :
1771 1771 unic = unicodedata.lookup(s)
1772 1772 # allow combining chars
1773 1773 if ('a'+unic).isidentifier():
1774 1774 return '\\'+s,[unic]
1775 1775 except KeyError:
1776 1776 pass
1777 1777 return '', []
1778 1778
1779 1779
1780 1780 def latex_matches(self, text:str) -> Tuple[str, Sequence[str]]:
1781 1781 """Match Latex syntax for unicode characters.
1782 1782
1783 1783 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
1784 1784 """
1785 1785 slashpos = text.rfind('\\')
1786 1786 if slashpos > -1:
1787 1787 s = text[slashpos:]
1788 1788 if s in latex_symbols:
1789 1789 # Try to complete a full latex symbol to unicode
1790 1790 # \\alpha -> Ξ±
1791 1791 return s, [latex_symbols[s]]
1792 1792 else:
1793 1793 # If a user has partially typed a latex symbol, give them
1794 1794 # a full list of options \al -> [\aleph, \alpha]
1795 1795 matches = [k for k in latex_symbols if k.startswith(s)]
1796 1796 if matches:
1797 1797 return s, matches
1798 1798 return '', ()
1799 1799
1800 1800 def dispatch_custom_completer(self, text):
1801 1801 if not self.custom_completers:
1802 1802 return
1803 1803
1804 1804 line = self.line_buffer
1805 1805 if not line.strip():
1806 1806 return None
1807 1807
1808 1808 # Create a little structure to pass all the relevant information about
1809 1809 # the current completion to any custom completer.
1810 1810 event = SimpleNamespace()
1811 1811 event.line = line
1812 1812 event.symbol = text
1813 1813 cmd = line.split(None,1)[0]
1814 1814 event.command = cmd
1815 1815 event.text_until_cursor = self.text_until_cursor
1816 1816
1817 1817 # for foo etc, try also to find completer for %foo
1818 1818 if not cmd.startswith(self.magic_escape):
1819 1819 try_magic = self.custom_completers.s_matches(
1820 1820 self.magic_escape + cmd)
1821 1821 else:
1822 1822 try_magic = []
1823 1823
1824 1824 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1825 1825 try_magic,
1826 1826 self.custom_completers.flat_matches(self.text_until_cursor)):
1827 1827 try:
1828 1828 res = c(event)
1829 1829 if res:
1830 1830 # first, try case sensitive match
1831 1831 withcase = [r for r in res if r.startswith(text)]
1832 1832 if withcase:
1833 1833 return withcase
1834 1834 # if none, then case insensitive ones are ok too
1835 1835 text_low = text.lower()
1836 1836 return [r for r in res if r.lower().startswith(text_low)]
1837 1837 except TryNext:
1838 1838 pass
1839 1839 except KeyboardInterrupt:
1840 1840 """
1841 1841 If custom completer take too long,
1842 1842 let keyboard interrupt abort and return nothing.
1843 1843 """
1844 1844 break
1845 1845
1846 1846 return None
1847 1847
1848 1848 def completions(self, text: str, offset: int)->Iterator[Completion]:
1849 1849 """
1850 1850 Returns an iterator over the possible completions
1851 1851
1852 1852 .. warning::
1853 1853
1854 1854 Unstable
1855 1855
1856 1856 This function is unstable, API may change without warning.
1857 1857 It will also raise unless use in proper context manager.
1858 1858
1859 1859 Parameters
1860 1860 ----------
1861 1861 text : str
1862 1862 Full text of the current input, multi line string.
1863 1863 offset : int
1864 1864 Integer representing the position of the cursor in ``text``. Offset
1865 1865 is 0-based indexed.
1866 1866
1867 1867 Yields
1868 1868 ------
1869 1869 Completion
1870 1870
1871 1871 Notes
1872 1872 -----
1873 1873 The cursor on a text can either be seen as being "in between"
1874 1874 characters or "On" a character depending on the interface visible to
1875 1875 the user. For consistency the cursor being on "in between" characters X
1876 1876 and Y is equivalent to the cursor being "on" character Y, that is to say
1877 1877 the character the cursor is on is considered as being after the cursor.
1878 1878
1879 1879 Combining characters may span more that one position in the
1880 1880 text.
1881 1881
1882 1882 .. note::
1883 1883
1884 1884 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1885 1885 fake Completion token to distinguish completion returned by Jedi
1886 1886 and usual IPython completion.
1887 1887
1888 1888 .. note::
1889 1889
1890 1890 Completions are not completely deduplicated yet. If identical
1891 1891 completions are coming from different sources this function does not
1892 1892 ensure that each completion object will only be present once.
1893 1893 """
1894 1894 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1895 1895 "It may change without warnings. "
1896 1896 "Use in corresponding context manager.",
1897 1897 category=ProvisionalCompleterWarning, stacklevel=2)
1898 1898
1899 1899 seen = set()
1900 1900 profiler:Optional[cProfile.Profile]
1901 1901 try:
1902 1902 if self.profile_completions:
1903 1903 import cProfile
1904 1904 profiler = cProfile.Profile()
1905 1905 profiler.enable()
1906 1906 else:
1907 1907 profiler = None
1908 1908
1909 1909 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1910 1910 if c and (c in seen):
1911 1911 continue
1912 1912 yield c
1913 1913 seen.add(c)
1914 1914 except KeyboardInterrupt:
1915 1915 """if completions take too long and users send keyboard interrupt,
1916 1916 do not crash and return ASAP. """
1917 1917 pass
1918 1918 finally:
1919 1919 if profiler is not None:
1920 1920 profiler.disable()
1921 1921 ensure_dir_exists(self.profiler_output_dir)
1922 1922 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
1923 1923 print("Writing profiler output to", output_path)
1924 1924 profiler.dump_stats(output_path)
1925 1925
1926 1926 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
1927 1927 """
1928 1928 Core completion module.Same signature as :any:`completions`, with the
1929 1929 extra `timeout` parameter (in seconds).
1930 1930
1931 1931 Computing jedi's completion ``.type`` can be quite expensive (it is a
1932 1932 lazy property) and can require some warm-up, more warm up than just
1933 1933 computing the ``name`` of a completion. The warm-up can be :
1934 1934
1935 1935 - Long warm-up the first time a module is encountered after
1936 1936 install/update: actually build parse/inference tree.
1937 1937
1938 1938 - first time the module is encountered in a session: load tree from
1939 1939 disk.
1940 1940
1941 1941 We don't want to block completions for tens of seconds so we give the
1942 1942 completer a "budget" of ``_timeout`` seconds per invocation to compute
1943 1943 completions types, the completions that have not yet been computed will
1944 1944 be marked as "unknown" an will have a chance to be computed next round
1945 1945 are things get cached.
1946 1946
1947 1947 Keep in mind that Jedi is not the only thing treating the completion so
1948 1948 keep the timeout short-ish as if we take more than 0.3 second we still
1949 1949 have lots of processing to do.
1950 1950
1951 1951 """
1952 1952 deadline = time.monotonic() + _timeout
1953 1953
1954 1954
1955 1955 before = full_text[:offset]
1956 1956 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1957 1957
1958 1958 matched_text, matches, matches_origin, jedi_matches = self._complete(
1959 1959 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1960 1960
1961 1961 iter_jm = iter(jedi_matches)
1962 1962 if _timeout:
1963 1963 for jm in iter_jm:
1964 1964 try:
1965 1965 type_ = jm.type
1966 1966 except Exception:
1967 1967 if self.debug:
1968 1968 print("Error in Jedi getting type of ", jm)
1969 1969 type_ = None
1970 1970 delta = len(jm.name_with_symbols) - len(jm.complete)
1971 1971 if type_ == 'function':
1972 1972 signature = _make_signature(jm)
1973 1973 else:
1974 1974 signature = ''
1975 1975 yield Completion(start=offset - delta,
1976 1976 end=offset,
1977 1977 text=jm.name_with_symbols,
1978 1978 type=type_,
1979 1979 signature=signature,
1980 1980 _origin='jedi')
1981 1981
1982 1982 if time.monotonic() > deadline:
1983 1983 break
1984 1984
1985 1985 for jm in iter_jm:
1986 1986 delta = len(jm.name_with_symbols) - len(jm.complete)
1987 1987 yield Completion(start=offset - delta,
1988 1988 end=offset,
1989 1989 text=jm.name_with_symbols,
1990 1990 type='<unknown>', # don't compute type for speed
1991 1991 _origin='jedi',
1992 1992 signature='')
1993 1993
1994 1994
1995 1995 start_offset = before.rfind(matched_text)
1996 1996
1997 1997 # TODO:
1998 1998 # Suppress this, right now just for debug.
1999 1999 if jedi_matches and matches and self.debug:
2000 2000 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--',
2001 2001 _origin='debug', type='none', signature='')
2002 2002
2003 2003 # I'm unsure if this is always true, so let's assert and see if it
2004 2004 # crash
2005 2005 assert before.endswith(matched_text)
2006 2006 for m, t in zip(matches, matches_origin):
2007 2007 yield Completion(start=start_offset, end=offset, text=m, _origin=t, signature='', type='<unknown>')
2008 2008
2009 2009
2010 2010 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
2011 2011 """Find completions for the given text and line context.
2012 2012
2013 2013 Note that both the text and the line_buffer are optional, but at least
2014 2014 one of them must be given.
2015 2015
2016 2016 Parameters
2017 2017 ----------
2018 2018 text : string, optional
2019 2019 Text to perform the completion on. If not given, the line buffer
2020 2020 is split using the instance's CompletionSplitter object.
2021 2021 line_buffer : string, optional
2022 2022 If not given, the completer attempts to obtain the current line
2023 2023 buffer via readline. This keyword allows clients which are
2024 2024 requesting for text completions in non-readline contexts to inform
2025 2025 the completer of the entire text.
2026 2026 cursor_pos : int, optional
2027 2027 Index of the cursor in the full line buffer. Should be provided by
2028 2028 remote frontends where kernel has no access to frontend state.
2029 2029
2030 2030 Returns
2031 2031 -------
2032 2032 Tuple of two items:
2033 2033 text : str
2034 2034 Text that was actually used in the completion.
2035 2035 matches : list
2036 2036 A list of completion matches.
2037 2037
2038 2038 Notes
2039 2039 -----
2040 2040 This API is likely to be deprecated and replaced by
2041 2041 :any:`IPCompleter.completions` in the future.
2042 2042
2043 2043 """
2044 2044 warnings.warn('`Completer.complete` is pending deprecation since '
2045 2045 'IPython 6.0 and will be replaced by `Completer.completions`.',
2046 2046 PendingDeprecationWarning)
2047 2047 # potential todo, FOLD the 3rd throw away argument of _complete
2048 2048 # into the first 2 one.
2049 2049 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
2050 2050
2051 2051 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
2052 2052 full_text=None) -> _CompleteResult:
2053 2053 """
2054 2054 Like complete but can also returns raw jedi completions as well as the
2055 2055 origin of the completion text. This could (and should) be made much
2056 2056 cleaner but that will be simpler once we drop the old (and stateful)
2057 2057 :any:`complete` API.
2058 2058
2059 2059 With current provisional API, cursor_pos act both (depending on the
2060 2060 caller) as the offset in the ``text`` or ``line_buffer``, or as the
2061 2061 ``column`` when passing multiline strings this could/should be renamed
2062 2062 but would add extra noise.
2063 2063
2064 2064 Parameters
2065 2065 ----------
2066 2066 cursor_line
2067 2067 Index of the line the cursor is on. 0 indexed.
2068 2068 cursor_pos
2069 2069 Position of the cursor in the current line/line_buffer/text. 0
2070 2070 indexed.
2071 2071 line_buffer : optional, str
2072 2072 The current line the cursor is in, this is mostly due to legacy
2073 2073 reason that readline could only give a us the single current line.
2074 2074 Prefer `full_text`.
2075 2075 text : str
2076 2076 The current "token" the cursor is in, mostly also for historical
2077 2077 reasons. as the completer would trigger only after the current line
2078 2078 was parsed.
2079 2079 full_text : str
2080 2080 Full text of the current cell.
2081 2081
2082 2082 Returns
2083 2083 -------
2084 2084 A tuple of N elements which are (likely):
2085 2085 matched_text: ? the text that the complete matched
2086 2086 matches: list of completions ?
2087 2087 matches_origin: ? list same length as matches, and where each completion came from
2088 2088 jedi_matches: list of Jedi matches, have it's own structure.
2089 2089 """
2090 2090
2091 2091
2092 2092 # if the cursor position isn't given, the only sane assumption we can
2093 2093 # make is that it's at the end of the line (the common case)
2094 2094 if cursor_pos is None:
2095 2095 cursor_pos = len(line_buffer) if text is None else len(text)
2096 2096
2097 2097 if self.use_main_ns:
2098 2098 self.namespace = __main__.__dict__
2099 2099
2100 2100 # if text is either None or an empty string, rely on the line buffer
2101 2101 if (not line_buffer) and full_text:
2102 2102 line_buffer = full_text.split('\n')[cursor_line]
2103 2103 if not text: # issue #11508: check line_buffer before calling split_line
2104 2104 text = self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ''
2105 2105
2106 2106 if self.backslash_combining_completions:
2107 2107 # allow deactivation of these on windows.
2108 2108 base_text = text if not line_buffer else line_buffer[:cursor_pos]
2109 2109
2110 2110 for meth in (self.latex_matches,
2111 2111 self.unicode_name_matches,
2112 2112 back_latex_name_matches,
2113 2113 back_unicode_name_matches,
2114 2114 self.fwd_unicode_match):
2115 2115 name_text, name_matches = meth(base_text)
2116 2116 if name_text:
2117 2117 return _CompleteResult(name_text, name_matches[:MATCHES_LIMIT], \
2118 2118 [meth.__qualname__]*min(len(name_matches), MATCHES_LIMIT), ())
2119 2119
2120 2120
2121 2121 # If no line buffer is given, assume the input text is all there was
2122 2122 if line_buffer is None:
2123 2123 line_buffer = text
2124 2124
2125 2125 self.line_buffer = line_buffer
2126 2126 self.text_until_cursor = self.line_buffer[:cursor_pos]
2127 2127
2128 2128 # Do magic arg matches
2129 2129 for matcher in self.magic_arg_matchers:
2130 2130 matches = list(matcher(line_buffer))[:MATCHES_LIMIT]
2131 2131 if matches:
2132 2132 origins = [matcher.__qualname__] * len(matches)
2133 2133 return _CompleteResult(text, matches, origins, ())
2134 2134
2135 2135 # Start with a clean slate of completions
2136 2136 matches = []
2137 2137
2138 2138 # FIXME: we should extend our api to return a dict with completions for
2139 2139 # different types of objects. The rlcomplete() method could then
2140 2140 # simply collapse the dict into a list for readline, but we'd have
2141 2141 # richer completion semantics in other environments.
2142 completions:Iterable[Any] = []
2143 if self.use_jedi:
2142 is_magic_prefix = len(text) > 0 and text[0] == "%"
2143 completions: Iterable[Any] = []
2144 if self.use_jedi and not is_magic_prefix:
2144 2145 if not full_text:
2145 2146 full_text = line_buffer
2146 2147 completions = self._jedi_matches(
2147 2148 cursor_pos, cursor_line, full_text)
2148 2149
2149 2150 if self.merge_completions:
2150 2151 matches = []
2151 2152 for matcher in self.matchers:
2152 2153 try:
2153 2154 matches.extend([(m, matcher.__qualname__)
2154 2155 for m in matcher(text)])
2155 2156 except:
2156 2157 # Show the ugly traceback if the matcher causes an
2157 2158 # exception, but do NOT crash the kernel!
2158 2159 sys.excepthook(*sys.exc_info())
2159 2160 else:
2160 2161 for matcher in self.matchers:
2161 2162 matches = [(m, matcher.__qualname__)
2162 2163 for m in matcher(text)]
2163 2164 if matches:
2164 2165 break
2165 2166
2166 2167 seen = set()
2167 2168 filtered_matches = set()
2168 2169 for m in matches:
2169 2170 t, c = m
2170 2171 if t not in seen:
2171 2172 filtered_matches.add(m)
2172 2173 seen.add(t)
2173 2174
2174 2175 _filtered_matches = sorted(filtered_matches, key=lambda x: completions_sorting_key(x[0]))
2175 2176
2176 2177 custom_res = [(m, 'custom') for m in self.dispatch_custom_completer(text) or []]
2177 2178
2178 2179 _filtered_matches = custom_res or _filtered_matches
2179 2180
2180 2181 _filtered_matches = _filtered_matches[:MATCHES_LIMIT]
2181 2182 _matches = [m[0] for m in _filtered_matches]
2182 2183 origins = [m[1] for m in _filtered_matches]
2183 2184
2184 2185 self.matches = _matches
2185 2186
2186 2187 return _CompleteResult(text, _matches, origins, completions)
2187 2188
2188 2189 def fwd_unicode_match(self, text:str) -> Tuple[str, Sequence[str]]:
2189 2190 """
2190 2191 Forward match a string starting with a backslash with a list of
2191 2192 potential Unicode completions.
2192 2193
2193 2194 Will compute list list of Unicode character names on first call and cache it.
2194 2195
2195 2196 Returns
2196 2197 -------
2197 2198 At tuple with:
2198 2199 - matched text (empty if no matches)
2199 2200 - list of potential completions, empty tuple otherwise)
2200 2201 """
2201 2202 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
2202 2203 # We could do a faster match using a Trie.
2203 2204
2204 2205 # Using pygtrie the following seem to work:
2205 2206
2206 2207 # s = PrefixSet()
2207 2208
2208 2209 # for c in range(0,0x10FFFF + 1):
2209 2210 # try:
2210 2211 # s.add(unicodedata.name(chr(c)))
2211 2212 # except ValueError:
2212 2213 # pass
2213 2214 # [''.join(k) for k in s.iter(prefix)]
2214 2215
2215 2216 # But need to be timed and adds an extra dependency.
2216 2217
2217 2218 slashpos = text.rfind('\\')
2218 2219 # if text starts with slash
2219 2220 if slashpos > -1:
2220 2221 # PERF: It's important that we don't access self._unicode_names
2221 2222 # until we're inside this if-block. _unicode_names is lazily
2222 2223 # initialized, and it takes a user-noticeable amount of time to
2223 2224 # initialize it, so we don't want to initialize it unless we're
2224 2225 # actually going to use it.
2225 2226 s = text[slashpos + 1 :]
2226 2227 sup = s.upper()
2227 2228 candidates = [x for x in self.unicode_names if x.startswith(sup)]
2228 2229 if candidates:
2229 2230 return s, candidates
2230 2231 candidates = [x for x in self.unicode_names if sup in x]
2231 2232 if candidates:
2232 2233 return s, candidates
2233 2234 splitsup = sup.split(" ")
2234 2235 candidates = [
2235 2236 x for x in self.unicode_names if all(u in x for u in splitsup)
2236 2237 ]
2237 2238 if candidates:
2238 2239 return s, candidates
2239 2240
2240 2241 return "", ()
2241 2242
2242 2243 # if text does not start with slash
2243 2244 else:
2244 2245 return '', ()
2245 2246
2246 2247 @property
2247 2248 def unicode_names(self) -> List[str]:
2248 2249 """List of names of unicode code points that can be completed.
2249 2250
2250 2251 The list is lazily initialized on first access.
2251 2252 """
2252 2253 if self._unicode_names is None:
2253 2254 names = []
2254 2255 for c in range(0,0x10FFFF + 1):
2255 2256 try:
2256 2257 names.append(unicodedata.name(chr(c)))
2257 2258 except ValueError:
2258 2259 pass
2259 2260 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
2260 2261
2261 2262 return self._unicode_names
2262 2263
2263 2264 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
2264 2265 names = []
2265 2266 for start,stop in ranges:
2266 2267 for c in range(start, stop) :
2267 2268 try:
2268 2269 names.append(unicodedata.name(chr(c)))
2269 2270 except ValueError:
2270 2271 pass
2271 2272 return names
@@ -1,1264 +1,1275 b''
1 1 # encoding: utf-8
2 2 """Tests for the IPython tab-completion machinery."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 import os
8 8 import pytest
9 9 import sys
10 10 import textwrap
11 11 import unittest
12 12
13 13 from contextlib import contextmanager
14 14
15 15 from traitlets.config.loader import Config
16 16 from IPython import get_ipython
17 17 from IPython.core import completer
18 18 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
19 19 from IPython.utils.generics import complete_object
20 20 from IPython.testing import decorators as dec
21 21
22 22 from IPython.core.completer import (
23 23 Completion,
24 24 provisionalcompleter,
25 25 match_dict_keys,
26 26 _deduplicate_completions,
27 27 )
28 28
29 29 # -----------------------------------------------------------------------------
30 30 # Test functions
31 31 # -----------------------------------------------------------------------------
32 32
33 33 def recompute_unicode_ranges():
34 34 """
35 35 utility to recompute the largest unicode range without any characters
36 36
37 37 use to recompute the gap in the global _UNICODE_RANGES of completer.py
38 38 """
39 39 import itertools
40 40 import unicodedata
41 41 valid = []
42 42 for c in range(0,0x10FFFF + 1):
43 43 try:
44 44 unicodedata.name(chr(c))
45 45 except ValueError:
46 46 continue
47 47 valid.append(c)
48 48
49 49 def ranges(i):
50 50 for a, b in itertools.groupby(enumerate(i), lambda pair: pair[1] - pair[0]):
51 51 b = list(b)
52 52 yield b[0][1], b[-1][1]
53 53
54 54 rg = list(ranges(valid))
55 55 lens = []
56 56 gap_lens = []
57 57 pstart, pstop = 0,0
58 58 for start, stop in rg:
59 59 lens.append(stop-start)
60 60 gap_lens.append((start - pstop, hex(pstop), hex(start), f'{round((start - pstop)/0xe01f0*100)}%'))
61 61 pstart, pstop = start, stop
62 62
63 63 return sorted(gap_lens)[-1]
64 64
65 65
66 66
67 67 def test_unicode_range():
68 68 """
69 69 Test that the ranges we test for unicode names give the same number of
70 70 results than testing the full length.
71 71 """
72 72 from IPython.core.completer import _unicode_name_compute, _UNICODE_RANGES
73 73
74 74 expected_list = _unicode_name_compute([(0, 0x110000)])
75 75 test = _unicode_name_compute(_UNICODE_RANGES)
76 76 len_exp = len(expected_list)
77 77 len_test = len(test)
78 78
79 79 # do not inline the len() or on error pytest will try to print the 130 000 +
80 80 # elements.
81 81 message = None
82 82 if len_exp != len_test or len_exp > 131808:
83 83 size, start, stop, prct = recompute_unicode_ranges()
84 84 message = f"""_UNICODE_RANGES likely wrong and need updating. This is
85 85 likely due to a new release of Python. We've find that the biggest gap
86 86 in unicode characters has reduces in size to be {size} characters
87 87 ({prct}), from {start}, to {stop}. In completer.py likely update to
88 88
89 89 _UNICODE_RANGES = [(32, {start}), ({stop}, 0xe01f0)]
90 90
91 91 And update the assertion below to use
92 92
93 93 len_exp <= {len_exp}
94 94 """
95 95 assert len_exp == len_test, message
96 96
97 97 # fail if new unicode symbols have been added.
98 98 assert len_exp <= 138552, message
99 99
100 100
101 101 @contextmanager
102 102 def greedy_completion():
103 103 ip = get_ipython()
104 104 greedy_original = ip.Completer.greedy
105 105 try:
106 106 ip.Completer.greedy = True
107 107 yield
108 108 finally:
109 109 ip.Completer.greedy = greedy_original
110 110
111 111
112 112 def test_protect_filename():
113 113 if sys.platform == "win32":
114 114 pairs = [
115 115 ("abc", "abc"),
116 116 (" abc", '" abc"'),
117 117 ("a bc", '"a bc"'),
118 118 ("a bc", '"a bc"'),
119 119 (" bc", '" bc"'),
120 120 ]
121 121 else:
122 122 pairs = [
123 123 ("abc", "abc"),
124 124 (" abc", r"\ abc"),
125 125 ("a bc", r"a\ bc"),
126 126 ("a bc", r"a\ \ bc"),
127 127 (" bc", r"\ \ bc"),
128 128 # On posix, we also protect parens and other special characters.
129 129 ("a(bc", r"a\(bc"),
130 130 ("a)bc", r"a\)bc"),
131 131 ("a( )bc", r"a\(\ \)bc"),
132 132 ("a[1]bc", r"a\[1\]bc"),
133 133 ("a{1}bc", r"a\{1\}bc"),
134 134 ("a#bc", r"a\#bc"),
135 135 ("a?bc", r"a\?bc"),
136 136 ("a=bc", r"a\=bc"),
137 137 ("a\\bc", r"a\\bc"),
138 138 ("a|bc", r"a\|bc"),
139 139 ("a;bc", r"a\;bc"),
140 140 ("a:bc", r"a\:bc"),
141 141 ("a'bc", r"a\'bc"),
142 142 ("a*bc", r"a\*bc"),
143 143 ('a"bc', r"a\"bc"),
144 144 ("a^bc", r"a\^bc"),
145 145 ("a&bc", r"a\&bc"),
146 146 ]
147 147 # run the actual tests
148 148 for s1, s2 in pairs:
149 149 s1p = completer.protect_filename(s1)
150 150 assert s1p == s2
151 151
152 152
153 153 def check_line_split(splitter, test_specs):
154 154 for part1, part2, split in test_specs:
155 155 cursor_pos = len(part1)
156 156 line = part1 + part2
157 157 out = splitter.split_line(line, cursor_pos)
158 158 assert out == split
159 159
160 160
161 161 def test_line_split():
162 162 """Basic line splitter test with default specs."""
163 163 sp = completer.CompletionSplitter()
164 164 # The format of the test specs is: part1, part2, expected answer. Parts 1
165 165 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
166 166 # was at the end of part1. So an empty part2 represents someone hitting
167 167 # tab at the end of the line, the most common case.
168 168 t = [
169 169 ("run some/scrip", "", "some/scrip"),
170 170 ("run scripts/er", "ror.py foo", "scripts/er"),
171 171 ("echo $HOM", "", "HOM"),
172 172 ("print sys.pa", "", "sys.pa"),
173 173 ("print(sys.pa", "", "sys.pa"),
174 174 ("execfile('scripts/er", "", "scripts/er"),
175 175 ("a[x.", "", "x."),
176 176 ("a[x.", "y", "x."),
177 177 ('cd "some_file/', "", "some_file/"),
178 178 ]
179 179 check_line_split(sp, t)
180 180 # Ensure splitting works OK with unicode by re-running the tests with
181 181 # all inputs turned into unicode
182 182 check_line_split(sp, [map(str, p) for p in t])
183 183
184 184
185 185 class NamedInstanceClass:
186 186 instances = {}
187 187
188 188 def __init__(self, name):
189 189 self.instances[name] = self
190 190
191 191 @classmethod
192 192 def _ipython_key_completions_(cls):
193 193 return cls.instances.keys()
194 194
195 195
196 196 class KeyCompletable:
197 197 def __init__(self, things=()):
198 198 self.things = things
199 199
200 200 def _ipython_key_completions_(self):
201 201 return list(self.things)
202 202
203 203
204 204 class TestCompleter(unittest.TestCase):
205 205 def setUp(self):
206 206 """
207 207 We want to silence all PendingDeprecationWarning when testing the completer
208 208 """
209 209 self._assertwarns = self.assertWarns(PendingDeprecationWarning)
210 210 self._assertwarns.__enter__()
211 211
212 212 def tearDown(self):
213 213 try:
214 214 self._assertwarns.__exit__(None, None, None)
215 215 except AssertionError:
216 216 pass
217 217
218 218 def test_custom_completion_error(self):
219 219 """Test that errors from custom attribute completers are silenced."""
220 220 ip = get_ipython()
221 221
222 222 class A:
223 223 pass
224 224
225 225 ip.user_ns["x"] = A()
226 226
227 227 @complete_object.register(A)
228 228 def complete_A(a, existing_completions):
229 229 raise TypeError("this should be silenced")
230 230
231 231 ip.complete("x.")
232 232
233 233 def test_custom_completion_ordering(self):
234 234 """Test that errors from custom attribute completers are silenced."""
235 235 ip = get_ipython()
236 236
237 237 _, matches = ip.complete('in')
238 238 assert matches.index('input') < matches.index('int')
239 239
240 240 def complete_example(a):
241 241 return ['example2', 'example1']
242 242
243 243 ip.Completer.custom_completers.add_re('ex*', complete_example)
244 244 _, matches = ip.complete('ex')
245 245 assert matches.index('example2') < matches.index('example1')
246 246
247 247 def test_unicode_completions(self):
248 248 ip = get_ipython()
249 249 # Some strings that trigger different types of completion. Check them both
250 250 # in str and unicode forms
251 251 s = ["ru", "%ru", "cd /", "floa", "float(x)/"]
252 252 for t in s + list(map(str, s)):
253 253 # We don't need to check exact completion values (they may change
254 254 # depending on the state of the namespace, but at least no exceptions
255 255 # should be thrown and the return value should be a pair of text, list
256 256 # values.
257 257 text, matches = ip.complete(t)
258 258 self.assertIsInstance(text, str)
259 259 self.assertIsInstance(matches, list)
260 260
261 261 def test_latex_completions(self):
262 262 from IPython.core.latex_symbols import latex_symbols
263 263 import random
264 264
265 265 ip = get_ipython()
266 266 # Test some random unicode symbols
267 267 keys = random.sample(sorted(latex_symbols), 10)
268 268 for k in keys:
269 269 text, matches = ip.complete(k)
270 270 self.assertEqual(text, k)
271 271 self.assertEqual(matches, [latex_symbols[k]])
272 272 # Test a more complex line
273 273 text, matches = ip.complete("print(\\alpha")
274 274 self.assertEqual(text, "\\alpha")
275 275 self.assertEqual(matches[0], latex_symbols["\\alpha"])
276 276 # Test multiple matching latex symbols
277 277 text, matches = ip.complete("\\al")
278 278 self.assertIn("\\alpha", matches)
279 279 self.assertIn("\\aleph", matches)
280 280
281 281 def test_latex_no_results(self):
282 282 """
283 283 forward latex should really return nothing in either field if nothing is found.
284 284 """
285 285 ip = get_ipython()
286 286 text, matches = ip.Completer.latex_matches("\\really_i_should_match_nothing")
287 287 self.assertEqual(text, "")
288 288 self.assertEqual(matches, ())
289 289
290 290 def test_back_latex_completion(self):
291 291 ip = get_ipython()
292 292
293 293 # do not return more than 1 matches for \beta, only the latex one.
294 294 name, matches = ip.complete("\\Ξ²")
295 295 self.assertEqual(matches, ["\\beta"])
296 296
297 297 def test_back_unicode_completion(self):
298 298 ip = get_ipython()
299 299
300 300 name, matches = ip.complete("\\β…€")
301 301 self.assertEqual(matches, ("\\ROMAN NUMERAL FIVE",))
302 302
303 303 def test_forward_unicode_completion(self):
304 304 ip = get_ipython()
305 305
306 306 name, matches = ip.complete("\\ROMAN NUMERAL FIVE")
307 307 self.assertEqual(matches, ["β…€"]) # This is not a V
308 308 self.assertEqual(matches, ["\u2164"]) # same as above but explicit.
309 309
310 310 def test_delim_setting(self):
311 311 sp = completer.CompletionSplitter()
312 312 sp.delims = " "
313 313 self.assertEqual(sp.delims, " ")
314 314 self.assertEqual(sp._delim_expr, r"[\ ]")
315 315
316 316 def test_spaces(self):
317 317 """Test with only spaces as split chars."""
318 318 sp = completer.CompletionSplitter()
319 319 sp.delims = " "
320 320 t = [("foo", "", "foo"), ("run foo", "", "foo"), ("run foo", "bar", "foo")]
321 321 check_line_split(sp, t)
322 322
323 323 def test_has_open_quotes1(self):
324 324 for s in ["'", "'''", "'hi' '"]:
325 325 self.assertEqual(completer.has_open_quotes(s), "'")
326 326
327 327 def test_has_open_quotes2(self):
328 328 for s in ['"', '"""', '"hi" "']:
329 329 self.assertEqual(completer.has_open_quotes(s), '"')
330 330
331 331 def test_has_open_quotes3(self):
332 332 for s in ["''", "''' '''", "'hi' 'ipython'"]:
333 333 self.assertFalse(completer.has_open_quotes(s))
334 334
335 335 def test_has_open_quotes4(self):
336 336 for s in ['""', '""" """', '"hi" "ipython"']:
337 337 self.assertFalse(completer.has_open_quotes(s))
338 338
339 339 @pytest.mark.xfail(
340 340 sys.platform == "win32", reason="abspath completions fail on Windows"
341 341 )
342 342 def test_abspath_file_completions(self):
343 343 ip = get_ipython()
344 344 with TemporaryDirectory() as tmpdir:
345 345 prefix = os.path.join(tmpdir, "foo")
346 346 suffixes = ["1", "2"]
347 347 names = [prefix + s for s in suffixes]
348 348 for n in names:
349 349 open(n, "w", encoding="utf-8").close()
350 350
351 351 # Check simple completion
352 352 c = ip.complete(prefix)[1]
353 353 self.assertEqual(c, names)
354 354
355 355 # Now check with a function call
356 356 cmd = 'a = f("%s' % prefix
357 357 c = ip.complete(prefix, cmd)[1]
358 358 comp = [prefix + s for s in suffixes]
359 359 self.assertEqual(c, comp)
360 360
361 361 def test_local_file_completions(self):
362 362 ip = get_ipython()
363 363 with TemporaryWorkingDirectory():
364 364 prefix = "./foo"
365 365 suffixes = ["1", "2"]
366 366 names = [prefix + s for s in suffixes]
367 367 for n in names:
368 368 open(n, "w", encoding="utf-8").close()
369 369
370 370 # Check simple completion
371 371 c = ip.complete(prefix)[1]
372 372 self.assertEqual(c, names)
373 373
374 374 # Now check with a function call
375 375 cmd = 'a = f("%s' % prefix
376 376 c = ip.complete(prefix, cmd)[1]
377 377 comp = {prefix + s for s in suffixes}
378 378 self.assertTrue(comp.issubset(set(c)))
379 379
380 380 def test_quoted_file_completions(self):
381 381 ip = get_ipython()
382 382 with TemporaryWorkingDirectory():
383 383 name = "foo'bar"
384 384 open(name, "w", encoding="utf-8").close()
385 385
386 386 # Don't escape Windows
387 387 escaped = name if sys.platform == "win32" else "foo\\'bar"
388 388
389 389 # Single quote matches embedded single quote
390 390 text = "open('foo"
391 391 c = ip.Completer._complete(
392 392 cursor_line=0, cursor_pos=len(text), full_text=text
393 393 )[1]
394 394 self.assertEqual(c, [escaped])
395 395
396 396 # Double quote requires no escape
397 397 text = 'open("foo'
398 398 c = ip.Completer._complete(
399 399 cursor_line=0, cursor_pos=len(text), full_text=text
400 400 )[1]
401 401 self.assertEqual(c, [name])
402 402
403 403 # No quote requires an escape
404 404 text = "%ls foo"
405 405 c = ip.Completer._complete(
406 406 cursor_line=0, cursor_pos=len(text), full_text=text
407 407 )[1]
408 408 self.assertEqual(c, [escaped])
409 409
410 410 def test_all_completions_dups(self):
411 411 """
412 412 Make sure the output of `IPCompleter.all_completions` does not have
413 413 duplicated prefixes.
414 414 """
415 415 ip = get_ipython()
416 416 c = ip.Completer
417 417 ip.ex("class TestClass():\n\ta=1\n\ta1=2")
418 418 for jedi_status in [True, False]:
419 419 with provisionalcompleter():
420 420 ip.Completer.use_jedi = jedi_status
421 421 matches = c.all_completions("TestCl")
422 422 assert matches == ["TestClass"], (jedi_status, matches)
423 423 matches = c.all_completions("TestClass.")
424 424 assert len(matches) > 2, (jedi_status, matches)
425 425 matches = c.all_completions("TestClass.a")
426 426 assert matches == ['TestClass.a', 'TestClass.a1'], jedi_status
427 427
428 428 def test_jedi(self):
429 429 """
430 430 A couple of issue we had with Jedi
431 431 """
432 432 ip = get_ipython()
433 433
434 434 def _test_complete(reason, s, comp, start=None, end=None):
435 435 l = len(s)
436 436 start = start if start is not None else l
437 437 end = end if end is not None else l
438 438 with provisionalcompleter():
439 439 ip.Completer.use_jedi = True
440 440 completions = set(ip.Completer.completions(s, l))
441 441 ip.Completer.use_jedi = False
442 442 assert Completion(start, end, comp) in completions, reason
443 443
444 444 def _test_not_complete(reason, s, comp):
445 445 l = len(s)
446 446 with provisionalcompleter():
447 447 ip.Completer.use_jedi = True
448 448 completions = set(ip.Completer.completions(s, l))
449 449 ip.Completer.use_jedi = False
450 450 assert Completion(l, l, comp) not in completions, reason
451 451
452 452 import jedi
453 453
454 454 jedi_version = tuple(int(i) for i in jedi.__version__.split(".")[:3])
455 455 if jedi_version > (0, 10):
456 456 _test_complete("jedi >0.9 should complete and not crash", "a=1;a.", "real")
457 457 _test_complete("can infer first argument", 'a=(1,"foo");a[0].', "real")
458 458 _test_complete("can infer second argument", 'a=(1,"foo");a[1].', "capitalize")
459 459 _test_complete("cover duplicate completions", "im", "import", 0, 2)
460 460
461 461 _test_not_complete("does not mix types", 'a=(1,"foo");a[0].', "capitalize")
462 462
463 463 def test_completion_have_signature(self):
464 464 """
465 465 Lets make sure jedi is capable of pulling out the signature of the function we are completing.
466 466 """
467 467 ip = get_ipython()
468 468 with provisionalcompleter():
469 469 ip.Completer.use_jedi = True
470 470 completions = ip.Completer.completions("ope", 3)
471 471 c = next(completions) # should be `open`
472 472 ip.Completer.use_jedi = False
473 473 assert "file" in c.signature, "Signature of function was not found by completer"
474 474 assert (
475 475 "encoding" in c.signature
476 476 ), "Signature of function was not found by completer"
477 477
478 478 @pytest.mark.xfail(reason="Known failure on jedi<=0.18.0")
479 479 def test_deduplicate_completions(self):
480 480 """
481 481 Test that completions are correctly deduplicated (even if ranges are not the same)
482 482 """
483 483 ip = get_ipython()
484 484 ip.ex(
485 485 textwrap.dedent(
486 486 """
487 487 class Z:
488 488 zoo = 1
489 489 """
490 490 )
491 491 )
492 492 with provisionalcompleter():
493 493 ip.Completer.use_jedi = True
494 494 l = list(
495 495 _deduplicate_completions("Z.z", ip.Completer.completions("Z.z", 3))
496 496 )
497 497 ip.Completer.use_jedi = False
498 498
499 499 assert len(l) == 1, "Completions (Z.z<tab>) correctly deduplicate: %s " % l
500 500 assert l[0].text == "zoo" # and not `it.accumulate`
501 501
502 502 def test_greedy_completions(self):
503 503 """
504 504 Test the capability of the Greedy completer.
505 505
506 506 Most of the test here does not really show off the greedy completer, for proof
507 507 each of the text below now pass with Jedi. The greedy completer is capable of more.
508 508
509 509 See the :any:`test_dict_key_completion_contexts`
510 510
511 511 """
512 512 ip = get_ipython()
513 513 ip.ex("a=list(range(5))")
514 514 _, c = ip.complete(".", line="a[0].")
515 515 self.assertFalse(".real" in c, "Shouldn't have completed on a[0]: %s" % c)
516 516
517 517 def _(line, cursor_pos, expect, message, completion):
518 518 with greedy_completion(), provisionalcompleter():
519 519 ip.Completer.use_jedi = False
520 520 _, c = ip.complete(".", line=line, cursor_pos=cursor_pos)
521 521 self.assertIn(expect, c, message % c)
522 522
523 523 ip.Completer.use_jedi = True
524 524 with provisionalcompleter():
525 525 completions = ip.Completer.completions(line, cursor_pos)
526 526 self.assertIn(completion, completions)
527 527
528 528 with provisionalcompleter():
529 529 _(
530 530 "a[0].",
531 531 5,
532 532 "a[0].real",
533 533 "Should have completed on a[0].: %s",
534 534 Completion(5, 5, "real"),
535 535 )
536 536 _(
537 537 "a[0].r",
538 538 6,
539 539 "a[0].real",
540 540 "Should have completed on a[0].r: %s",
541 541 Completion(5, 6, "real"),
542 542 )
543 543
544 544 _(
545 545 "a[0].from_",
546 546 10,
547 547 "a[0].from_bytes",
548 548 "Should have completed on a[0].from_: %s",
549 549 Completion(5, 10, "from_bytes"),
550 550 )
551 551
552 552 def test_omit__names(self):
553 553 # also happens to test IPCompleter as a configurable
554 554 ip = get_ipython()
555 555 ip._hidden_attr = 1
556 556 ip._x = {}
557 557 c = ip.Completer
558 558 ip.ex("ip=get_ipython()")
559 559 cfg = Config()
560 560 cfg.IPCompleter.omit__names = 0
561 561 c.update_config(cfg)
562 562 with provisionalcompleter():
563 563 c.use_jedi = False
564 564 s, matches = c.complete("ip.")
565 565 self.assertIn("ip.__str__", matches)
566 566 self.assertIn("ip._hidden_attr", matches)
567 567
568 568 # c.use_jedi = True
569 569 # completions = set(c.completions('ip.', 3))
570 570 # self.assertIn(Completion(3, 3, '__str__'), completions)
571 571 # self.assertIn(Completion(3,3, "_hidden_attr"), completions)
572 572
573 573 cfg = Config()
574 574 cfg.IPCompleter.omit__names = 1
575 575 c.update_config(cfg)
576 576 with provisionalcompleter():
577 577 c.use_jedi = False
578 578 s, matches = c.complete("ip.")
579 579 self.assertNotIn("ip.__str__", matches)
580 580 # self.assertIn('ip._hidden_attr', matches)
581 581
582 582 # c.use_jedi = True
583 583 # completions = set(c.completions('ip.', 3))
584 584 # self.assertNotIn(Completion(3,3,'__str__'), completions)
585 585 # self.assertIn(Completion(3,3, "_hidden_attr"), completions)
586 586
587 587 cfg = Config()
588 588 cfg.IPCompleter.omit__names = 2
589 589 c.update_config(cfg)
590 590 with provisionalcompleter():
591 591 c.use_jedi = False
592 592 s, matches = c.complete("ip.")
593 593 self.assertNotIn("ip.__str__", matches)
594 594 self.assertNotIn("ip._hidden_attr", matches)
595 595
596 596 # c.use_jedi = True
597 597 # completions = set(c.completions('ip.', 3))
598 598 # self.assertNotIn(Completion(3,3,'__str__'), completions)
599 599 # self.assertNotIn(Completion(3,3, "_hidden_attr"), completions)
600 600
601 601 with provisionalcompleter():
602 602 c.use_jedi = False
603 603 s, matches = c.complete("ip._x.")
604 604 self.assertIn("ip._x.keys", matches)
605 605
606 606 # c.use_jedi = True
607 607 # completions = set(c.completions('ip._x.', 6))
608 608 # self.assertIn(Completion(6,6, "keys"), completions)
609 609
610 610 del ip._hidden_attr
611 611 del ip._x
612 612
613 613 def test_limit_to__all__False_ok(self):
614 614 """
615 615 Limit to all is deprecated, once we remove it this test can go away.
616 616 """
617 617 ip = get_ipython()
618 618 c = ip.Completer
619 619 c.use_jedi = False
620 620 ip.ex("class D: x=24")
621 621 ip.ex("d=D()")
622 622 cfg = Config()
623 623 cfg.IPCompleter.limit_to__all__ = False
624 624 c.update_config(cfg)
625 625 s, matches = c.complete("d.")
626 626 self.assertIn("d.x", matches)
627 627
628 628 def test_get__all__entries_ok(self):
629 629 class A:
630 630 __all__ = ["x", 1]
631 631
632 632 words = completer.get__all__entries(A())
633 633 self.assertEqual(words, ["x"])
634 634
635 635 def test_get__all__entries_no__all__ok(self):
636 636 class A:
637 637 pass
638 638
639 639 words = completer.get__all__entries(A())
640 640 self.assertEqual(words, [])
641 641
642 642 def test_func_kw_completions(self):
643 643 ip = get_ipython()
644 644 c = ip.Completer
645 645 c.use_jedi = False
646 646 ip.ex("def myfunc(a=1,b=2): return a+b")
647 647 s, matches = c.complete(None, "myfunc(1,b")
648 648 self.assertIn("b=", matches)
649 649 # Simulate completing with cursor right after b (pos==10):
650 650 s, matches = c.complete(None, "myfunc(1,b)", 10)
651 651 self.assertIn("b=", matches)
652 652 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
653 653 self.assertIn("b=", matches)
654 654 # builtin function
655 655 s, matches = c.complete(None, "min(k, k")
656 656 self.assertIn("key=", matches)
657 657
658 658 def test_default_arguments_from_docstring(self):
659 659 ip = get_ipython()
660 660 c = ip.Completer
661 661 kwd = c._default_arguments_from_docstring("min(iterable[, key=func]) -> value")
662 662 self.assertEqual(kwd, ["key"])
663 663 # with cython type etc
664 664 kwd = c._default_arguments_from_docstring(
665 665 "Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
666 666 )
667 667 self.assertEqual(kwd, ["ncall", "resume", "nsplit"])
668 668 # white spaces
669 669 kwd = c._default_arguments_from_docstring(
670 670 "\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
671 671 )
672 672 self.assertEqual(kwd, ["ncall", "resume", "nsplit"])
673 673
674 674 def test_line_magics(self):
675 675 ip = get_ipython()
676 676 c = ip.Completer
677 677 s, matches = c.complete(None, "lsmag")
678 678 self.assertIn("%lsmagic", matches)
679 679 s, matches = c.complete(None, "%lsmag")
680 680 self.assertIn("%lsmagic", matches)
681 681
682 682 def test_cell_magics(self):
683 683 from IPython.core.magic import register_cell_magic
684 684
685 685 @register_cell_magic
686 686 def _foo_cellm(line, cell):
687 687 pass
688 688
689 689 ip = get_ipython()
690 690 c = ip.Completer
691 691
692 692 s, matches = c.complete(None, "_foo_ce")
693 693 self.assertIn("%%_foo_cellm", matches)
694 694 s, matches = c.complete(None, "%%_foo_ce")
695 695 self.assertIn("%%_foo_cellm", matches)
696 696
697 697 def test_line_cell_magics(self):
698 698 from IPython.core.magic import register_line_cell_magic
699 699
700 700 @register_line_cell_magic
701 701 def _bar_cellm(line, cell):
702 702 pass
703 703
704 704 ip = get_ipython()
705 705 c = ip.Completer
706 706
707 707 # The policy here is trickier, see comments in completion code. The
708 708 # returned values depend on whether the user passes %% or not explicitly,
709 709 # and this will show a difference if the same name is both a line and cell
710 710 # magic.
711 711 s, matches = c.complete(None, "_bar_ce")
712 712 self.assertIn("%_bar_cellm", matches)
713 713 self.assertIn("%%_bar_cellm", matches)
714 714 s, matches = c.complete(None, "%_bar_ce")
715 715 self.assertIn("%_bar_cellm", matches)
716 716 self.assertIn("%%_bar_cellm", matches)
717 717 s, matches = c.complete(None, "%%_bar_ce")
718 718 self.assertNotIn("%_bar_cellm", matches)
719 719 self.assertIn("%%_bar_cellm", matches)
720 720
721 721 def test_magic_completion_order(self):
722 722 ip = get_ipython()
723 723 c = ip.Completer
724 724
725 725 # Test ordering of line and cell magics.
726 726 text, matches = c.complete("timeit")
727 727 self.assertEqual(matches, ["%timeit", "%%timeit"])
728 728
729 729 def test_magic_completion_shadowing(self):
730 730 ip = get_ipython()
731 731 c = ip.Completer
732 732 c.use_jedi = False
733 733
734 734 # Before importing matplotlib, %matplotlib magic should be the only option.
735 735 text, matches = c.complete("mat")
736 736 self.assertEqual(matches, ["%matplotlib"])
737 737
738 738 # The newly introduced name should shadow the magic.
739 739 ip.run_cell("matplotlib = 1")
740 740 text, matches = c.complete("mat")
741 741 self.assertEqual(matches, ["matplotlib"])
742 742
743 743 # After removing matplotlib from namespace, the magic should again be
744 744 # the only option.
745 745 del ip.user_ns["matplotlib"]
746 746 text, matches = c.complete("mat")
747 747 self.assertEqual(matches, ["%matplotlib"])
748 748
749 749 def test_magic_completion_shadowing_explicit(self):
750 750 """
751 751 If the user try to complete a shadowed magic, and explicit % start should
752 752 still return the completions.
753 753 """
754 754 ip = get_ipython()
755 755 c = ip.Completer
756 756
757 757 # Before importing matplotlib, %matplotlib magic should be the only option.
758 758 text, matches = c.complete("%mat")
759 759 self.assertEqual(matches, ["%matplotlib"])
760 760
761 761 ip.run_cell("matplotlib = 1")
762 762
763 763 # After removing matplotlib from namespace, the magic should still be
764 764 # the only option.
765 765 text, matches = c.complete("%mat")
766 766 self.assertEqual(matches, ["%matplotlib"])
767 767
768 768 def test_magic_config(self):
769 769 ip = get_ipython()
770 770 c = ip.Completer
771 771
772 772 s, matches = c.complete(None, "conf")
773 773 self.assertIn("%config", matches)
774 774 s, matches = c.complete(None, "conf")
775 775 self.assertNotIn("AliasManager", matches)
776 776 s, matches = c.complete(None, "config ")
777 777 self.assertIn("AliasManager", matches)
778 778 s, matches = c.complete(None, "%config ")
779 779 self.assertIn("AliasManager", matches)
780 780 s, matches = c.complete(None, "config Ali")
781 781 self.assertListEqual(["AliasManager"], matches)
782 782 s, matches = c.complete(None, "%config Ali")
783 783 self.assertListEqual(["AliasManager"], matches)
784 784 s, matches = c.complete(None, "config AliasManager")
785 785 self.assertListEqual(["AliasManager"], matches)
786 786 s, matches = c.complete(None, "%config AliasManager")
787 787 self.assertListEqual(["AliasManager"], matches)
788 788 s, matches = c.complete(None, "config AliasManager.")
789 789 self.assertIn("AliasManager.default_aliases", matches)
790 790 s, matches = c.complete(None, "%config AliasManager.")
791 791 self.assertIn("AliasManager.default_aliases", matches)
792 792 s, matches = c.complete(None, "config AliasManager.de")
793 793 self.assertListEqual(["AliasManager.default_aliases"], matches)
794 794 s, matches = c.complete(None, "config AliasManager.de")
795 795 self.assertListEqual(["AliasManager.default_aliases"], matches)
796 796
797 797 def test_magic_color(self):
798 798 ip = get_ipython()
799 799 c = ip.Completer
800 800
801 801 s, matches = c.complete(None, "colo")
802 802 self.assertIn("%colors", matches)
803 803 s, matches = c.complete(None, "colo")
804 804 self.assertNotIn("NoColor", matches)
805 805 s, matches = c.complete(None, "%colors") # No trailing space
806 806 self.assertNotIn("NoColor", matches)
807 807 s, matches = c.complete(None, "colors ")
808 808 self.assertIn("NoColor", matches)
809 809 s, matches = c.complete(None, "%colors ")
810 810 self.assertIn("NoColor", matches)
811 811 s, matches = c.complete(None, "colors NoCo")
812 812 self.assertListEqual(["NoColor"], matches)
813 813 s, matches = c.complete(None, "%colors NoCo")
814 814 self.assertListEqual(["NoColor"], matches)
815 815
816 816 def test_match_dict_keys(self):
817 817 """
818 818 Test that match_dict_keys works on a couple of use case does return what
819 819 expected, and does not crash
820 820 """
821 821 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
822 822
823 823 keys = ["foo", b"far"]
824 824 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2, ["far"])
825 825 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2, ["far"])
826 826 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2, ["far"])
827 827 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2, ["far"])
828 828
829 829 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1, ["foo"])
830 830 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1, ["foo"])
831 831 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1, ["foo"])
832 832 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1, ["foo"])
833 833
834 834 match_dict_keys
835 835
836 836 def test_match_dict_keys_tuple(self):
837 837 """
838 838 Test that match_dict_keys called with extra prefix works on a couple of use case,
839 839 does return what expected, and does not crash.
840 840 """
841 841 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
842 842
843 843 keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ('other', 'test')]
844 844
845 845 # Completion on first key == "foo"
846 846 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["bar", "oof"])
847 847 assert match_dict_keys(keys, "\"", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["bar", "oof"])
848 848 assert match_dict_keys(keys, "'o", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["oof"])
849 849 assert match_dict_keys(keys, "\"o", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["oof"])
850 850 assert match_dict_keys(keys, "b'", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
851 851 assert match_dict_keys(keys, "b\"", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
852 852 assert match_dict_keys(keys, "b'b", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
853 853 assert match_dict_keys(keys, "b\"b", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
854 854
855 855 # No Completion
856 856 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("no_foo",)) == ("'", 1, [])
857 857 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("fo",)) == ("'", 1, [])
858 858
859 859 keys = [('foo1', 'foo2', 'foo3', 'foo4'), ('foo1', 'foo2', 'bar', 'foo4')]
860 860 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1',)) == ("'", 1, ["foo2", "foo2"])
861 861 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2')) == ("'", 1, ["foo3"])
862 862 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3')) == ("'", 1, ["foo4"])
863 863 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3', 'foo4')) == ("'", 1, [])
864 864
865 865 def test_dict_key_completion_string(self):
866 866 """Test dictionary key completion for string keys"""
867 867 ip = get_ipython()
868 868 complete = ip.Completer.complete
869 869
870 870 ip.user_ns["d"] = {"abc": None}
871 871
872 872 # check completion at different stages
873 873 _, matches = complete(line_buffer="d[")
874 874 self.assertIn("'abc'", matches)
875 875 self.assertNotIn("'abc']", matches)
876 876
877 877 _, matches = complete(line_buffer="d['")
878 878 self.assertIn("abc", matches)
879 879 self.assertNotIn("abc']", matches)
880 880
881 881 _, matches = complete(line_buffer="d['a")
882 882 self.assertIn("abc", matches)
883 883 self.assertNotIn("abc']", matches)
884 884
885 885 # check use of different quoting
886 886 _, matches = complete(line_buffer='d["')
887 887 self.assertIn("abc", matches)
888 888 self.assertNotIn('abc"]', matches)
889 889
890 890 _, matches = complete(line_buffer='d["a')
891 891 self.assertIn("abc", matches)
892 892 self.assertNotIn('abc"]', matches)
893 893
894 894 # check sensitivity to following context
895 895 _, matches = complete(line_buffer="d[]", cursor_pos=2)
896 896 self.assertIn("'abc'", matches)
897 897
898 898 _, matches = complete(line_buffer="d['']", cursor_pos=3)
899 899 self.assertIn("abc", matches)
900 900 self.assertNotIn("abc'", matches)
901 901 self.assertNotIn("abc']", matches)
902 902
903 903 # check multiple solutions are correctly returned and that noise is not
904 904 ip.user_ns["d"] = {
905 905 "abc": None,
906 906 "abd": None,
907 907 "bad": None,
908 908 object(): None,
909 909 5: None,
910 910 ("abe", None): None,
911 911 (None, "abf"): None
912 912 }
913 913
914 914 _, matches = complete(line_buffer="d['a")
915 915 self.assertIn("abc", matches)
916 916 self.assertIn("abd", matches)
917 917 self.assertNotIn("bad", matches)
918 918 self.assertNotIn("abe", matches)
919 919 self.assertNotIn("abf", matches)
920 920 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
921 921
922 922 # check escaping and whitespace
923 923 ip.user_ns["d"] = {"a\nb": None, "a'b": None, 'a"b': None, "a word": None}
924 924 _, matches = complete(line_buffer="d['a")
925 925 self.assertIn("a\\nb", matches)
926 926 self.assertIn("a\\'b", matches)
927 927 self.assertIn('a"b', matches)
928 928 self.assertIn("a word", matches)
929 929 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
930 930
931 931 # - can complete on non-initial word of the string
932 932 _, matches = complete(line_buffer="d['a w")
933 933 self.assertIn("word", matches)
934 934
935 935 # - understands quote escaping
936 936 _, matches = complete(line_buffer="d['a\\'")
937 937 self.assertIn("b", matches)
938 938
939 939 # - default quoting should work like repr
940 940 _, matches = complete(line_buffer="d[")
941 941 self.assertIn('"a\'b"', matches)
942 942
943 943 # - when opening quote with ", possible to match with unescaped apostrophe
944 944 _, matches = complete(line_buffer="d[\"a'")
945 945 self.assertIn("b", matches)
946 946
947 947 # need to not split at delims that readline won't split at
948 948 if "-" not in ip.Completer.splitter.delims:
949 949 ip.user_ns["d"] = {"before-after": None}
950 950 _, matches = complete(line_buffer="d['before-af")
951 951 self.assertIn("before-after", matches)
952 952
953 953 # check completion on tuple-of-string keys at different stage - on first key
954 954 ip.user_ns["d"] = {('foo', 'bar'): None}
955 955 _, matches = complete(line_buffer="d[")
956 956 self.assertIn("'foo'", matches)
957 957 self.assertNotIn("'foo']", matches)
958 958 self.assertNotIn("'bar'", matches)
959 959 self.assertNotIn("foo", matches)
960 960 self.assertNotIn("bar", matches)
961 961
962 962 # - match the prefix
963 963 _, matches = complete(line_buffer="d['f")
964 964 self.assertIn("foo", matches)
965 965 self.assertNotIn("foo']", matches)
966 966 self.assertNotIn('foo"]', matches)
967 967 _, matches = complete(line_buffer="d['foo")
968 968 self.assertIn("foo", matches)
969 969
970 970 # - can complete on second key
971 971 _, matches = complete(line_buffer="d['foo', ")
972 972 self.assertIn("'bar'", matches)
973 973 _, matches = complete(line_buffer="d['foo', 'b")
974 974 self.assertIn("bar", matches)
975 975 self.assertNotIn("foo", matches)
976 976
977 977 # - does not propose missing keys
978 978 _, matches = complete(line_buffer="d['foo', 'f")
979 979 self.assertNotIn("bar", matches)
980 980 self.assertNotIn("foo", matches)
981 981
982 982 # check sensitivity to following context
983 983 _, matches = complete(line_buffer="d['foo',]", cursor_pos=8)
984 984 self.assertIn("'bar'", matches)
985 985 self.assertNotIn("bar", matches)
986 986 self.assertNotIn("'foo'", matches)
987 987 self.assertNotIn("foo", matches)
988 988
989 989 _, matches = complete(line_buffer="d['']", cursor_pos=3)
990 990 self.assertIn("foo", matches)
991 991 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
992 992
993 993 _, matches = complete(line_buffer='d[""]', cursor_pos=3)
994 994 self.assertIn("foo", matches)
995 995 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
996 996
997 997 _, matches = complete(line_buffer='d["foo","]', cursor_pos=9)
998 998 self.assertIn("bar", matches)
999 999 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1000 1000
1001 1001 _, matches = complete(line_buffer='d["foo",]', cursor_pos=8)
1002 1002 self.assertIn("'bar'", matches)
1003 1003 self.assertNotIn("bar", matches)
1004 1004
1005 1005 # Can complete with longer tuple keys
1006 1006 ip.user_ns["d"] = {('foo', 'bar', 'foobar'): None}
1007 1007
1008 1008 # - can complete second key
1009 1009 _, matches = complete(line_buffer="d['foo', 'b")
1010 1010 self.assertIn("bar", matches)
1011 1011 self.assertNotIn("foo", matches)
1012 1012 self.assertNotIn("foobar", matches)
1013 1013
1014 1014 # - can complete third key
1015 1015 _, matches = complete(line_buffer="d['foo', 'bar', 'fo")
1016 1016 self.assertIn("foobar", matches)
1017 1017 self.assertNotIn("foo", matches)
1018 1018 self.assertNotIn("bar", matches)
1019 1019
1020 1020 def test_dict_key_completion_contexts(self):
1021 1021 """Test expression contexts in which dict key completion occurs"""
1022 1022 ip = get_ipython()
1023 1023 complete = ip.Completer.complete
1024 1024 d = {"abc": None}
1025 1025 ip.user_ns["d"] = d
1026 1026
1027 1027 class C:
1028 1028 data = d
1029 1029
1030 1030 ip.user_ns["C"] = C
1031 1031 ip.user_ns["get"] = lambda: d
1032 1032
1033 1033 def assert_no_completion(**kwargs):
1034 1034 _, matches = complete(**kwargs)
1035 1035 self.assertNotIn("abc", matches)
1036 1036 self.assertNotIn("abc'", matches)
1037 1037 self.assertNotIn("abc']", matches)
1038 1038 self.assertNotIn("'abc'", matches)
1039 1039 self.assertNotIn("'abc']", matches)
1040 1040
1041 1041 def assert_completion(**kwargs):
1042 1042 _, matches = complete(**kwargs)
1043 1043 self.assertIn("'abc'", matches)
1044 1044 self.assertNotIn("'abc']", matches)
1045 1045
1046 1046 # no completion after string closed, even if reopened
1047 1047 assert_no_completion(line_buffer="d['a'")
1048 1048 assert_no_completion(line_buffer='d["a"')
1049 1049 assert_no_completion(line_buffer="d['a' + ")
1050 1050 assert_no_completion(line_buffer="d['a' + '")
1051 1051
1052 1052 # completion in non-trivial expressions
1053 1053 assert_completion(line_buffer="+ d[")
1054 1054 assert_completion(line_buffer="(d[")
1055 1055 assert_completion(line_buffer="C.data[")
1056 1056
1057 1057 # greedy flag
1058 1058 def assert_completion(**kwargs):
1059 1059 _, matches = complete(**kwargs)
1060 1060 self.assertIn("get()['abc']", matches)
1061 1061
1062 1062 assert_no_completion(line_buffer="get()[")
1063 1063 with greedy_completion():
1064 1064 assert_completion(line_buffer="get()[")
1065 1065 assert_completion(line_buffer="get()['")
1066 1066 assert_completion(line_buffer="get()['a")
1067 1067 assert_completion(line_buffer="get()['ab")
1068 1068 assert_completion(line_buffer="get()['abc")
1069 1069
1070 1070 def test_dict_key_completion_bytes(self):
1071 1071 """Test handling of bytes in dict key completion"""
1072 1072 ip = get_ipython()
1073 1073 complete = ip.Completer.complete
1074 1074
1075 1075 ip.user_ns["d"] = {"abc": None, b"abd": None}
1076 1076
1077 1077 _, matches = complete(line_buffer="d[")
1078 1078 self.assertIn("'abc'", matches)
1079 1079 self.assertIn("b'abd'", matches)
1080 1080
1081 1081 if False: # not currently implemented
1082 1082 _, matches = complete(line_buffer="d[b")
1083 1083 self.assertIn("b'abd'", matches)
1084 1084 self.assertNotIn("b'abc'", matches)
1085 1085
1086 1086 _, matches = complete(line_buffer="d[b'")
1087 1087 self.assertIn("abd", matches)
1088 1088 self.assertNotIn("abc", matches)
1089 1089
1090 1090 _, matches = complete(line_buffer="d[B'")
1091 1091 self.assertIn("abd", matches)
1092 1092 self.assertNotIn("abc", matches)
1093 1093
1094 1094 _, matches = complete(line_buffer="d['")
1095 1095 self.assertIn("abc", matches)
1096 1096 self.assertNotIn("abd", matches)
1097 1097
1098 1098 def test_dict_key_completion_unicode_py3(self):
1099 1099 """Test handling of unicode in dict key completion"""
1100 1100 ip = get_ipython()
1101 1101 complete = ip.Completer.complete
1102 1102
1103 1103 ip.user_ns["d"] = {"a\u05d0": None}
1104 1104
1105 1105 # query using escape
1106 1106 if sys.platform != "win32":
1107 1107 # Known failure on Windows
1108 1108 _, matches = complete(line_buffer="d['a\\u05d0")
1109 1109 self.assertIn("u05d0", matches) # tokenized after \\
1110 1110
1111 1111 # query using character
1112 1112 _, matches = complete(line_buffer="d['a\u05d0")
1113 1113 self.assertIn("a\u05d0", matches)
1114 1114
1115 1115 with greedy_completion():
1116 1116 # query using escape
1117 1117 _, matches = complete(line_buffer="d['a\\u05d0")
1118 1118 self.assertIn("d['a\\u05d0']", matches) # tokenized after \\
1119 1119
1120 1120 # query using character
1121 1121 _, matches = complete(line_buffer="d['a\u05d0")
1122 1122 self.assertIn("d['a\u05d0']", matches)
1123 1123
1124 1124 @dec.skip_without("numpy")
1125 1125 def test_struct_array_key_completion(self):
1126 1126 """Test dict key completion applies to numpy struct arrays"""
1127 1127 import numpy
1128 1128
1129 1129 ip = get_ipython()
1130 1130 complete = ip.Completer.complete
1131 1131 ip.user_ns["d"] = numpy.array([], dtype=[("hello", "f"), ("world", "f")])
1132 1132 _, matches = complete(line_buffer="d['")
1133 1133 self.assertIn("hello", matches)
1134 1134 self.assertIn("world", matches)
1135 1135 # complete on the numpy struct itself
1136 1136 dt = numpy.dtype(
1137 1137 [("my_head", [("my_dt", ">u4"), ("my_df", ">u4")]), ("my_data", ">f4", 5)]
1138 1138 )
1139 1139 x = numpy.zeros(2, dtype=dt)
1140 1140 ip.user_ns["d"] = x[1]
1141 1141 _, matches = complete(line_buffer="d['")
1142 1142 self.assertIn("my_head", matches)
1143 1143 self.assertIn("my_data", matches)
1144 1144 # complete on a nested level
1145 1145 with greedy_completion():
1146 1146 ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
1147 1147 _, matches = complete(line_buffer="d[1]['my_head']['")
1148 1148 self.assertTrue(any(["my_dt" in m for m in matches]))
1149 1149 self.assertTrue(any(["my_df" in m for m in matches]))
1150 1150
1151 1151 @dec.skip_without("pandas")
1152 1152 def test_dataframe_key_completion(self):
1153 1153 """Test dict key completion applies to pandas DataFrames"""
1154 1154 import pandas
1155 1155
1156 1156 ip = get_ipython()
1157 1157 complete = ip.Completer.complete
1158 1158 ip.user_ns["d"] = pandas.DataFrame({"hello": [1], "world": [2]})
1159 1159 _, matches = complete(line_buffer="d['")
1160 1160 self.assertIn("hello", matches)
1161 1161 self.assertIn("world", matches)
1162 1162
1163 1163 def test_dict_key_completion_invalids(self):
1164 1164 """Smoke test cases dict key completion can't handle"""
1165 1165 ip = get_ipython()
1166 1166 complete = ip.Completer.complete
1167 1167
1168 1168 ip.user_ns["no_getitem"] = None
1169 1169 ip.user_ns["no_keys"] = []
1170 1170 ip.user_ns["cant_call_keys"] = dict
1171 1171 ip.user_ns["empty"] = {}
1172 1172 ip.user_ns["d"] = {"abc": 5}
1173 1173
1174 1174 _, matches = complete(line_buffer="no_getitem['")
1175 1175 _, matches = complete(line_buffer="no_keys['")
1176 1176 _, matches = complete(line_buffer="cant_call_keys['")
1177 1177 _, matches = complete(line_buffer="empty['")
1178 1178 _, matches = complete(line_buffer="name_error['")
1179 1179 _, matches = complete(line_buffer="d['\\") # incomplete escape
1180 1180
1181 1181 def test_object_key_completion(self):
1182 1182 ip = get_ipython()
1183 1183 ip.user_ns["key_completable"] = KeyCompletable(["qwerty", "qwick"])
1184 1184
1185 1185 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
1186 1186 self.assertIn("qwerty", matches)
1187 1187 self.assertIn("qwick", matches)
1188 1188
1189 1189 def test_class_key_completion(self):
1190 1190 ip = get_ipython()
1191 1191 NamedInstanceClass("qwerty")
1192 1192 NamedInstanceClass("qwick")
1193 1193 ip.user_ns["named_instance_class"] = NamedInstanceClass
1194 1194
1195 1195 _, matches = ip.Completer.complete(line_buffer="named_instance_class['qw")
1196 1196 self.assertIn("qwerty", matches)
1197 1197 self.assertIn("qwick", matches)
1198 1198
1199 1199 def test_tryimport(self):
1200 1200 """
1201 1201 Test that try-import don't crash on trailing dot, and import modules before
1202 1202 """
1203 1203 from IPython.core.completerlib import try_import
1204 1204
1205 1205 assert try_import("IPython.")
1206 1206
1207 1207 def test_aimport_module_completer(self):
1208 1208 ip = get_ipython()
1209 1209 _, matches = ip.complete("i", "%aimport i")
1210 1210 self.assertIn("io", matches)
1211 1211 self.assertNotIn("int", matches)
1212 1212
1213 1213 def test_nested_import_module_completer(self):
1214 1214 ip = get_ipython()
1215 1215 _, matches = ip.complete(None, "import IPython.co", 17)
1216 1216 self.assertIn("IPython.core", matches)
1217 1217 self.assertNotIn("import IPython.core", matches)
1218 1218 self.assertNotIn("IPython.display", matches)
1219 1219
1220 1220 def test_import_module_completer(self):
1221 1221 ip = get_ipython()
1222 1222 _, matches = ip.complete("i", "import i")
1223 1223 self.assertIn("io", matches)
1224 1224 self.assertNotIn("int", matches)
1225 1225
1226 1226 def test_from_module_completer(self):
1227 1227 ip = get_ipython()
1228 1228 _, matches = ip.complete("B", "from io import B", 16)
1229 1229 self.assertIn("BytesIO", matches)
1230 1230 self.assertNotIn("BaseException", matches)
1231 1231
1232 1232 def test_snake_case_completion(self):
1233 1233 ip = get_ipython()
1234 1234 ip.Completer.use_jedi = False
1235 1235 ip.user_ns["some_three"] = 3
1236 1236 ip.user_ns["some_four"] = 4
1237 1237 _, matches = ip.complete("s_", "print(s_f")
1238 1238 self.assertIn("some_three", matches)
1239 1239 self.assertIn("some_four", matches)
1240 1240
1241 1241 def test_mix_terms(self):
1242 1242 ip = get_ipython()
1243 1243 from textwrap import dedent
1244 1244
1245 1245 ip.Completer.use_jedi = False
1246 1246 ip.ex(
1247 1247 dedent(
1248 1248 """
1249 1249 class Test:
1250 1250 def meth(self, meth_arg1):
1251 1251 print("meth")
1252 1252
1253 1253 def meth_1(self, meth1_arg1, meth1_arg2):
1254 1254 print("meth1")
1255 1255
1256 1256 def meth_2(self, meth2_arg1, meth2_arg2):
1257 1257 print("meth2")
1258 1258 test = Test()
1259 1259 """
1260 1260 )
1261 1261 )
1262 1262 _, matches = ip.complete(None, "test.meth(")
1263 1263 self.assertIn("meth_arg1=", matches)
1264 1264 self.assertNotIn("meth2_arg1=", matches)
1265
1266 def test_percent_symbol_restrict_to_magic_completions(self):
1267 ip = get_ipython()
1268 completer = ip.Completer
1269 text = "%a"
1270
1271 with provisionalcompleter():
1272 completer.use_jedi = True
1273 completions = completer.completions(text, len(text))
1274 for c in completions:
1275 self.assertEqual(c.text[0], "%")
General Comments 0
You need to be logged in to leave comments. Login now