##// END OF EJS Templates
adding snake case auto complete for https://github.com/ipython/ipython/issues/9420
sagnak -
Show More
@@ -1,1879 +1,1888 b''
1 1 # encoding: utf-8
2 2 """Completion for IPython.
3 3
4 4 This module started as fork of the rlcompleter module in the Python standard
5 5 library. The original enhancements made to rlcompleter have been sent
6 6 upstream and were accepted as of Python 2.3,
7 7
8 8 This module now support a wide variety of completion mechanism both available
9 9 for normal classic Python code, as well as completer for IPython specific
10 10 Syntax like magics.
11 11
12 12 Latex and Unicode completion
13 13 ============================
14 14
15 15 IPython and compatible frontends not only can complete your code, but can help
16 16 you to input a wide range of characters. In particular we allow you to insert
17 17 a unicode character using the tab completion mechanism.
18 18
19 19 Forward latex/unicode completion
20 20 --------------------------------
21 21
22 22 Forward completion allows you to easily type a unicode character using its latex
23 23 name, or unicode long description. To do so type a backslash follow by the
24 24 relevant name and press tab:
25 25
26 26
27 27 Using latex completion:
28 28
29 29 .. code::
30 30
31 31 \\alpha<tab>
32 32 Ξ±
33 33
34 34 or using unicode completion:
35 35
36 36
37 37 .. code::
38 38
39 39 \\greek small letter alpha<tab>
40 40 Ξ±
41 41
42 42
43 43 Only valid Python identifiers will complete. Combining characters (like arrow or
44 44 dots) are also available, unlike latex they need to be put after the their
45 45 counterpart that is to say, `F\\\\vec<tab>` is correct, not `\\\\vec<tab>F`.
46 46
47 47 Some browsers are known to display combining characters incorrectly.
48 48
49 49 Backward latex completion
50 50 -------------------------
51 51
52 52 It is sometime challenging to know how to type a character, if you are using
53 53 IPython, or any compatible frontend you can prepend backslash to the character
54 54 and press `<tab>` to expand it to its latex form.
55 55
56 56 .. code::
57 57
58 58 \\Ξ±<tab>
59 59 \\alpha
60 60
61 61
62 62 Both forward and backward completions can be deactivated by setting the
63 63 ``Completer.backslash_combining_completions`` option to ``False``.
64 64
65 65
66 66 Experimental
67 67 ============
68 68
69 69 Starting with IPython 6.0, this module can make use of the Jedi library to
70 70 generate completions both using static analysis of the code, and dynamically
71 71 inspecting multiple namespaces. The APIs attached to this new mechanism is
72 72 unstable and will raise unless use in an :any:`provisionalcompleter` context
73 73 manager.
74 74
75 75 You will find that the following are experimental:
76 76
77 77 - :any:`provisionalcompleter`
78 78 - :any:`IPCompleter.completions`
79 79 - :any:`Completion`
80 80 - :any:`rectify_completions`
81 81
82 82 .. note::
83 83
84 84 better name for :any:`rectify_completions` ?
85 85
86 86 We welcome any feedback on these new API, and we also encourage you to try this
87 87 module in debug mode (start IPython with ``--Completer.debug=True``) in order
88 88 to have extra logging information is :any:`jedi` is crashing, or if current
89 89 IPython completer pending deprecations are returning results not yet handled
90 90 by :any:`jedi`.
91 91
92 92 Using Jedi for tab completion allow snippets like the following to work without
93 93 having to execute any code:
94 94
95 95 >>> myvar = ['hello', 42]
96 96 ... myvar[1].bi<tab>
97 97
98 98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
99 99 executing any code unlike the previously available ``IPCompleter.greedy``
100 100 option.
101 101
102 102 Be sure to update :any:`jedi` to the latest stable version or to try the
103 103 current development version to get better completions.
104 104 """
105 105
106 106 # skip module docstests
107 107 skip_doctest = True
108 108
109 109 # Copyright (c) IPython Development Team.
110 110 # Distributed under the terms of the Modified BSD License.
111 111 #
112 112 # Some of this code originated from rlcompleter in the Python standard library
113 113 # Copyright (C) 2001 Python Software Foundation, www.python.org
114 114
115 115
116 116 import __main__
117 117 import builtins as builtin_mod
118 118 import glob
119 119 import time
120 120 import inspect
121 121 import itertools
122 122 import keyword
123 123 import os
124 124 import re
125 125 import sys
126 126 import unicodedata
127 127 import string
128 128 import warnings
129 129
130 130 from contextlib import contextmanager
131 131 from importlib import import_module
132 132 from typing import Iterator, List
133 133 from types import SimpleNamespace
134 134
135 135 from traitlets.config.configurable import Configurable
136 136 from IPython.core.error import TryNext
137 137 from IPython.core.inputsplitter import ESC_MAGIC
138 138 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
139 139 from IPython.utils import generics
140 140 from IPython.utils.dir2 import dir2, get_real_method
141 141 from IPython.utils.process import arg_split
142 142 from IPython.utils.py3compat import cast_unicode_py2
143 143 from traitlets import Bool, Enum, observe, Int
144 144
145 145 try:
146 146 import jedi
147 147 import jedi.api.helpers
148 148 JEDI_INSTALLED = True
149 149 except ImportError:
150 150 JEDI_INSTALLED = False
151 151 #-----------------------------------------------------------------------------
152 152 # Globals
153 153 #-----------------------------------------------------------------------------
154 154
155 155 # Public API
156 156 __all__ = ['Completer','IPCompleter']
157 157
158 158 if sys.platform == 'win32':
159 159 PROTECTABLES = ' '
160 160 else:
161 161 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
162 162
163 163
164 164 _deprecation_readline_sentinel = object()
165 165
166 166
167 167 class ProvisionalCompleterWarning(FutureWarning):
168 168 """
169 169 Exception raise by an experimental feature in this module.
170 170
171 171 Wrap code in :any:`provisionalcompleter` context manager if you
172 172 are certain you want to use an unstable feature.
173 173 """
174 174 pass
175 175
176 176 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
177 177
178 178 @contextmanager
179 179 def provisionalcompleter(action='ignore'):
180 180 """
181 181
182 182
183 183 This contest manager has to be used in any place where unstable completer
184 184 behavior and API may be called.
185 185
186 186 >>> with provisionalcompleter():
187 187 ... completer.do_experimetal_things() # works
188 188
189 189 >>> completer.do_experimental_things() # raises.
190 190
191 191 .. note:: Unstable
192 192
193 193 By using this context manager you agree that the API in use may change
194 194 without warning, and that you won't complain if they do so.
195 195
196 196 You also understand that if the API is not to you liking you should report
197 197 a bug to explain your use case upstream and improve the API and will loose
198 198 credibility if you complain after the API is make stable.
199 199
200 200 We'll be happy to get your feedback , feature request and improvement on
201 201 any of the unstable APIs !
202 202 """
203 203 with warnings.catch_warnings():
204 204 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
205 205 yield
206 206
207 207
208 208 def has_open_quotes(s):
209 209 """Return whether a string has open quotes.
210 210
211 211 This simply counts whether the number of quote characters of either type in
212 212 the string is odd.
213 213
214 214 Returns
215 215 -------
216 216 If there is an open quote, the quote character is returned. Else, return
217 217 False.
218 218 """
219 219 # We check " first, then ', so complex cases with nested quotes will get
220 220 # the " to take precedence.
221 221 if s.count('"') % 2:
222 222 return '"'
223 223 elif s.count("'") % 2:
224 224 return "'"
225 225 else:
226 226 return False
227 227
228 228
229 229 def protect_filename(s):
230 230 """Escape a string to protect certain characters."""
231 231 if set(s) & set(PROTECTABLES):
232 232 if sys.platform == "win32":
233 233 return '"' + s + '"'
234 234 else:
235 235 return "".join(("\\" + c if c in PROTECTABLES else c) for c in s)
236 236 else:
237 237 return s
238 238
239 239
240 240 def expand_user(path):
241 241 """Expand ``~``-style usernames in strings.
242 242
243 243 This is similar to :func:`os.path.expanduser`, but it computes and returns
244 244 extra information that will be useful if the input was being used in
245 245 computing completions, and you wish to return the completions with the
246 246 original '~' instead of its expanded value.
247 247
248 248 Parameters
249 249 ----------
250 250 path : str
251 251 String to be expanded. If no ~ is present, the output is the same as the
252 252 input.
253 253
254 254 Returns
255 255 -------
256 256 newpath : str
257 257 Result of ~ expansion in the input path.
258 258 tilde_expand : bool
259 259 Whether any expansion was performed or not.
260 260 tilde_val : str
261 261 The value that ~ was replaced with.
262 262 """
263 263 # Default values
264 264 tilde_expand = False
265 265 tilde_val = ''
266 266 newpath = path
267 267
268 268 if path.startswith('~'):
269 269 tilde_expand = True
270 270 rest = len(path)-1
271 271 newpath = os.path.expanduser(path)
272 272 if rest:
273 273 tilde_val = newpath[:-rest]
274 274 else:
275 275 tilde_val = newpath
276 276
277 277 return newpath, tilde_expand, tilde_val
278 278
279 279
280 280 def compress_user(path, tilde_expand, tilde_val):
281 281 """Does the opposite of expand_user, with its outputs.
282 282 """
283 283 if tilde_expand:
284 284 return path.replace(tilde_val, '~')
285 285 else:
286 286 return path
287 287
288 288
289 289 def completions_sorting_key(word):
290 290 """key for sorting completions
291 291
292 292 This does several things:
293 293
294 294 - Lowercase all completions, so they are sorted alphabetically with
295 295 upper and lower case words mingled
296 296 - Demote any completions starting with underscores to the end
297 297 - Insert any %magic and %%cellmagic completions in the alphabetical order
298 298 by their name
299 299 """
300 300 # Case insensitive sort
301 301 word = word.lower()
302 302
303 303 prio1, prio2 = 0, 0
304 304
305 305 if word.startswith('__'):
306 306 prio1 = 2
307 307 elif word.startswith('_'):
308 308 prio1 = 1
309 309
310 310 if word.endswith('='):
311 311 prio1 = -1
312 312
313 313 if word.startswith('%%'):
314 314 # If there's another % in there, this is something else, so leave it alone
315 315 if not "%" in word[2:]:
316 316 word = word[2:]
317 317 prio2 = 2
318 318 elif word.startswith('%'):
319 319 if not "%" in word[1:]:
320 320 word = word[1:]
321 321 prio2 = 1
322 322
323 323 return prio1, word, prio2
324 324
325 325
326 326 class _FakeJediCompletion:
327 327 """
328 328 This is a workaround to communicate to the UI that Jedi has crashed and to
329 329 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
330 330
331 331 Added in IPython 6.0 so should likely be removed for 7.0
332 332
333 333 """
334 334
335 335 def __init__(self, name):
336 336
337 337 self.name = name
338 338 self.complete = name
339 339 self.type = 'crashed'
340 340 self.name_with_symbols = name
341 341
342 342 def __repr__(self):
343 343 return '<Fake completion object jedi has crashed>'
344 344
345 345
346 346 class Completion:
347 347 """
348 348 Completion object used and return by IPython completers.
349 349
350 350 .. warning:: Unstable
351 351
352 352 This function is unstable, API may change without warning.
353 353 It will also raise unless use in proper context manager.
354 354
355 355 This act as a middle ground :any:`Completion` object between the
356 356 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
357 357 object. While Jedi need a lot of information about evaluator and how the
358 358 code should be ran/inspected, PromptToolkit (and other frontend) mostly
359 359 need user facing information.
360 360
361 361 - Which range should be replaced replaced by what.
362 362 - Some metadata (like completion type), or meta informations to displayed to
363 363 the use user.
364 364
365 365 For debugging purpose we can also store the origin of the completion (``jedi``,
366 366 ``IPython.python_matches``, ``IPython.magics_matches``...).
367 367 """
368 368
369 369 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin=''):
370 370 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
371 371 "It may change without warnings. "
372 372 "Use in corresponding context manager.",
373 373 category=ProvisionalCompleterWarning, stacklevel=2)
374 374
375 375 self.start = start
376 376 self.end = end
377 377 self.text = text
378 378 self.type = type
379 379 self._origin = _origin
380 380
381 381 def __repr__(self):
382 382 return '<Completion start=%s end=%s text=%r type=%r>' % (self.start, self.end, self.text, self.type or '?')
383 383
384 384 def __eq__(self, other)->Bool:
385 385 """
386 386 Equality and hash do not hash the type (as some completer may not be
387 387 able to infer the type), but are use to (partially) de-duplicate
388 388 completion.
389 389
390 390 Completely de-duplicating completion is a bit tricker that just
391 391 comparing as it depends on surrounding text, which Completions are not
392 392 aware of.
393 393 """
394 394 return self.start == other.start and \
395 395 self.end == other.end and \
396 396 self.text == other.text
397 397
398 398 def __hash__(self):
399 399 return hash((self.start, self.end, self.text))
400 400
401 401
402 402 _IC = Iterator[Completion]
403 403
404 404
405 405 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
406 406 """
407 407 Deduplicate a set of completions.
408 408
409 409 .. warning:: Unstable
410 410
411 411 This function is unstable, API may change without warning.
412 412
413 413 Parameters
414 414 ----------
415 415 text: str
416 416 text that should be completed.
417 417 completions: Iterator[Completion]
418 418 iterator over the completions to deduplicate
419 419
420 420
421 421 Completions coming from multiple sources, may be different but end up having
422 422 the same effect when applied to ``text``. If this is the case, this will
423 423 consider completions as equal and only emit the first encountered.
424 424
425 425 Not folded in `completions()` yet for debugging purpose, and to detect when
426 426 the IPython completer does return things that Jedi does not, but should be
427 427 at some point.
428 428 """
429 429 completions = list(completions)
430 430 if not completions:
431 431 return
432 432
433 433 new_start = min(c.start for c in completions)
434 434 new_end = max(c.end for c in completions)
435 435
436 436 seen = set()
437 437 for c in completions:
438 438 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
439 439 if new_text not in seen:
440 440 yield c
441 441 seen.add(new_text)
442 442
443 443
444 444 def rectify_completions(text: str, completions: _IC, *, _debug=False)->_IC:
445 445 """
446 446 Rectify a set of completions to all have the same ``start`` and ``end``
447 447
448 448 .. warning:: Unstable
449 449
450 450 This function is unstable, API may change without warning.
451 451 It will also raise unless use in proper context manager.
452 452
453 453 Parameters
454 454 ----------
455 455 text: str
456 456 text that should be completed.
457 457 completions: Iterator[Completion]
458 458 iterator over the completions to rectify
459 459
460 460
461 461 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
462 462 the Jupyter Protocol requires them to behave like so. This will readjust
463 463 the completion to have the same ``start`` and ``end`` by padding both
464 464 extremities with surrounding text.
465 465
466 466 During stabilisation should support a ``_debug`` option to log which
467 467 completion are return by the IPython completer and not found in Jedi in
468 468 order to make upstream bug report.
469 469 """
470 470 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
471 471 "It may change without warnings. "
472 472 "Use in corresponding context manager.",
473 473 category=ProvisionalCompleterWarning, stacklevel=2)
474 474
475 475 completions = list(completions)
476 476 if not completions:
477 477 return
478 478 starts = (c.start for c in completions)
479 479 ends = (c.end for c in completions)
480 480
481 481 new_start = min(starts)
482 482 new_end = max(ends)
483 483
484 484 seen_jedi = set()
485 485 seen_python_matches = set()
486 486 for c in completions:
487 487 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
488 488 if c._origin == 'jedi':
489 489 seen_jedi.add(new_text)
490 490 elif c._origin == 'IPCompleter.python_matches':
491 491 seen_python_matches.add(new_text)
492 492 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin)
493 493 diff = seen_python_matches.difference(seen_jedi)
494 494 if diff and _debug:
495 495 print('IPython.python matches have extras:', diff)
496 496
497 497
498 498 if sys.platform == 'win32':
499 499 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
500 500 else:
501 501 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
502 502
503 503 GREEDY_DELIMS = ' =\r\n'
504 504
505 505
506 506 class CompletionSplitter(object):
507 507 """An object to split an input line in a manner similar to readline.
508 508
509 509 By having our own implementation, we can expose readline-like completion in
510 510 a uniform manner to all frontends. This object only needs to be given the
511 511 line of text to be split and the cursor position on said line, and it
512 512 returns the 'word' to be completed on at the cursor after splitting the
513 513 entire line.
514 514
515 515 What characters are used as splitting delimiters can be controlled by
516 516 setting the ``delims`` attribute (this is a property that internally
517 517 automatically builds the necessary regular expression)"""
518 518
519 519 # Private interface
520 520
521 521 # A string of delimiter characters. The default value makes sense for
522 522 # IPython's most typical usage patterns.
523 523 _delims = DELIMS
524 524
525 525 # The expression (a normal string) to be compiled into a regular expression
526 526 # for actual splitting. We store it as an attribute mostly for ease of
527 527 # debugging, since this type of code can be so tricky to debug.
528 528 _delim_expr = None
529 529
530 530 # The regular expression that does the actual splitting
531 531 _delim_re = None
532 532
533 533 def __init__(self, delims=None):
534 534 delims = CompletionSplitter._delims if delims is None else delims
535 535 self.delims = delims
536 536
537 537 @property
538 538 def delims(self):
539 539 """Return the string of delimiter characters."""
540 540 return self._delims
541 541
542 542 @delims.setter
543 543 def delims(self, delims):
544 544 """Set the delimiters for line splitting."""
545 545 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
546 546 self._delim_re = re.compile(expr)
547 547 self._delims = delims
548 548 self._delim_expr = expr
549 549
550 550 def split_line(self, line, cursor_pos=None):
551 551 """Split a line of text with a cursor at the given position.
552 552 """
553 553 l = line if cursor_pos is None else line[:cursor_pos]
554 554 return self._delim_re.split(l)[-1]
555 555
556 556
557 557
558 558 class Completer(Configurable):
559 559
560 560 greedy = Bool(False,
561 561 help="""Activate greedy completion
562 562 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
563 563
564 564 This will enable completion on elements of lists, results of function calls, etc.,
565 565 but can be unsafe because the code is actually evaluated on TAB.
566 566 """
567 567 ).tag(config=True)
568 568
569 569 use_jedi = Bool(default_value=JEDI_INSTALLED,
570 570 help="Experimental: Use Jedi to generate autocompletions. "
571 571 "Default to True if jedi is installed").tag(config=True)
572 572
573 573 jedi_compute_type_timeout = Int(default_value=400,
574 574 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
575 575 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
576 576 performance by preventing jedi to build its cache.
577 577 """).tag(config=True)
578 578
579 579 debug = Bool(default_value=False,
580 580 help='Enable debug for the Completer. Mostly print extra '
581 581 'information for experimental jedi integration.')\
582 582 .tag(config=True)
583 583
584 584 backslash_combining_completions = Bool(True,
585 585 help="Enable unicode completions, e.g. \\alpha<tab> . "
586 586 "Includes completion of latex commands, unicode names, and expanding "
587 587 "unicode characters back to latex commands.").tag(config=True)
588 588
589 589
590 590
591 591 def __init__(self, namespace=None, global_namespace=None, **kwargs):
592 592 """Create a new completer for the command line.
593 593
594 594 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
595 595
596 596 If unspecified, the default namespace where completions are performed
597 597 is __main__ (technically, __main__.__dict__). Namespaces should be
598 598 given as dictionaries.
599 599
600 600 An optional second namespace can be given. This allows the completer
601 601 to handle cases where both the local and global scopes need to be
602 602 distinguished.
603 603 """
604 604
605 605 # Don't bind to namespace quite yet, but flag whether the user wants a
606 606 # specific namespace or to use __main__.__dict__. This will allow us
607 607 # to bind to __main__.__dict__ at completion time, not now.
608 608 if namespace is None:
609 609 self.use_main_ns = True
610 610 else:
611 611 self.use_main_ns = False
612 612 self.namespace = namespace
613 613
614 614 # The global namespace, if given, can be bound directly
615 615 if global_namespace is None:
616 616 self.global_namespace = {}
617 617 else:
618 618 self.global_namespace = global_namespace
619 619
620 620 super(Completer, self).__init__(**kwargs)
621 621
622 622 def complete(self, text, state):
623 623 """Return the next possible completion for 'text'.
624 624
625 625 This is called successively with state == 0, 1, 2, ... until it
626 626 returns None. The completion should begin with 'text'.
627 627
628 628 """
629 629 if self.use_main_ns:
630 630 self.namespace = __main__.__dict__
631 631
632 632 if state == 0:
633 633 if "." in text:
634 634 self.matches = self.attr_matches(text)
635 635 else:
636 636 self.matches = self.global_matches(text)
637 637 try:
638 638 return self.matches[state]
639 639 except IndexError:
640 640 return None
641 641
642 642 def global_matches(self, text):
643 643 """Compute matches when text is a simple name.
644 644
645 645 Return a list of all keywords, built-in functions and names currently
646 646 defined in self.namespace or self.global_namespace that match.
647 647
648 648 """
649 649 matches = []
650 650 match_append = matches.append
651 651 n = len(text)
652 652 for lst in [keyword.kwlist,
653 653 builtin_mod.__dict__.keys(),
654 654 self.namespace.keys(),
655 655 self.global_namespace.keys()]:
656 656 for word in lst:
657 657 if word[:n] == text and word != "__builtins__":
658 658 match_append(word)
659
660 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?")
661 for lst in [self.namespace.keys(),
662 self.global_namespace.keys()]:
663 shortened = {"_".join([sub[0] for sub in word.split('_')]) : word
664 for word in lst if snake_case_re.fullmatch(word)}
665 for word in shortened.keys():
666 if word[:n] == text and word != "__builtins__":
667 match_append(shortened[word])
659 668 return [cast_unicode_py2(m) for m in matches]
660 669
661 670 def attr_matches(self, text):
662 671 """Compute matches when text contains a dot.
663 672
664 673 Assuming the text is of the form NAME.NAME....[NAME], and is
665 674 evaluatable in self.namespace or self.global_namespace, it will be
666 675 evaluated and its attributes (as revealed by dir()) are used as
667 676 possible completions. (For class instances, class members are are
668 677 also considered.)
669 678
670 679 WARNING: this can still invoke arbitrary C code, if an object
671 680 with a __getattr__ hook is evaluated.
672 681
673 682 """
674 683
675 684 # Another option, seems to work great. Catches things like ''.<tab>
676 685 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
677 686
678 687 if m:
679 688 expr, attr = m.group(1, 3)
680 689 elif self.greedy:
681 690 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
682 691 if not m2:
683 692 return []
684 693 expr, attr = m2.group(1,2)
685 694 else:
686 695 return []
687 696
688 697 try:
689 698 obj = eval(expr, self.namespace)
690 699 except:
691 700 try:
692 701 obj = eval(expr, self.global_namespace)
693 702 except:
694 703 return []
695 704
696 705 if self.limit_to__all__ and hasattr(obj, '__all__'):
697 706 words = get__all__entries(obj)
698 707 else:
699 708 words = dir2(obj)
700 709
701 710 try:
702 711 words = generics.complete_object(obj, words)
703 712 except TryNext:
704 713 pass
705 714 except AssertionError:
706 715 raise
707 716 except Exception:
708 717 # Silence errors from completion function
709 718 #raise # dbg
710 719 pass
711 720 # Build match list to return
712 721 n = len(attr)
713 722 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
714 723
715 724
716 725 def get__all__entries(obj):
717 726 """returns the strings in the __all__ attribute"""
718 727 try:
719 728 words = getattr(obj, '__all__')
720 729 except:
721 730 return []
722 731
723 732 return [cast_unicode_py2(w) for w in words if isinstance(w, str)]
724 733
725 734
726 735 def match_dict_keys(keys: List[str], prefix: str, delims: str):
727 736 """Used by dict_key_matches, matching the prefix to a list of keys
728 737
729 738 Parameters
730 739 ==========
731 740 keys:
732 741 list of keys in dictionary currently being completed.
733 742 prefix:
734 743 Part of the text already typed by the user. e.g. `mydict[b'fo`
735 744 delims:
736 745 String of delimiters to consider when finding the current key.
737 746
738 747 Returns
739 748 =======
740 749
741 750 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
742 751 ``quote`` being the quote that need to be used to close current string.
743 752 ``token_start`` the position where the replacement should start occurring,
744 753 ``matches`` a list of replacement/completion
745 754
746 755 """
747 756 if not prefix:
748 757 return None, 0, [repr(k) for k in keys
749 758 if isinstance(k, (str, bytes))]
750 759 quote_match = re.search('["\']', prefix)
751 760 quote = quote_match.group()
752 761 try:
753 762 prefix_str = eval(prefix + quote, {})
754 763 except Exception:
755 764 return None, 0, []
756 765
757 766 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
758 767 token_match = re.search(pattern, prefix, re.UNICODE)
759 768 token_start = token_match.start()
760 769 token_prefix = token_match.group()
761 770
762 771 matched = []
763 772 for key in keys:
764 773 try:
765 774 if not key.startswith(prefix_str):
766 775 continue
767 776 except (AttributeError, TypeError, UnicodeError):
768 777 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
769 778 continue
770 779
771 780 # reformat remainder of key to begin with prefix
772 781 rem = key[len(prefix_str):]
773 782 # force repr wrapped in '
774 783 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
775 784 if rem_repr.startswith('u') and prefix[0] not in 'uU':
776 785 # Found key is unicode, but prefix is Py2 string.
777 786 # Therefore attempt to interpret key as string.
778 787 try:
779 788 rem_repr = repr(rem.encode('ascii') + '"')
780 789 except UnicodeEncodeError:
781 790 continue
782 791
783 792 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
784 793 if quote == '"':
785 794 # The entered prefix is quoted with ",
786 795 # but the match is quoted with '.
787 796 # A contained " hence needs escaping for comparison:
788 797 rem_repr = rem_repr.replace('"', '\\"')
789 798
790 799 # then reinsert prefix from start of token
791 800 matched.append('%s%s' % (token_prefix, rem_repr))
792 801 return quote, token_start, matched
793 802
794 803
795 804 def cursor_to_position(text:int, line:int, column:int)->int:
796 805 """
797 806
798 807 Convert the (line,column) position of the cursor in text to an offset in a
799 808 string.
800 809
801 810 Parameters
802 811 ----------
803 812
804 813 text : str
805 814 The text in which to calculate the cursor offset
806 815 line : int
807 816 Line of the cursor; 0-indexed
808 817 column : int
809 818 Column of the cursor 0-indexed
810 819
811 820 Return
812 821 ------
813 822 Position of the cursor in ``text``, 0-indexed.
814 823
815 824 See Also
816 825 --------
817 826 position_to_cursor: reciprocal of this function
818 827
819 828 """
820 829 lines = text.split('\n')
821 830 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
822 831
823 832 return sum(len(l) + 1 for l in lines[:line]) + column
824 833
825 834 def position_to_cursor(text:str, offset:int)->(int, int):
826 835 """
827 836 Convert the position of the cursor in text (0 indexed) to a line
828 837 number(0-indexed) and a column number (0-indexed) pair
829 838
830 839 Position should be a valid position in ``text``.
831 840
832 841 Parameters
833 842 ----------
834 843
835 844 text : str
836 845 The text in which to calculate the cursor offset
837 846 offset : int
838 847 Position of the cursor in ``text``, 0-indexed.
839 848
840 849 Return
841 850 ------
842 851 (line, column) : (int, int)
843 852 Line of the cursor; 0-indexed, column of the cursor 0-indexed
844 853
845 854
846 855 See Also
847 856 --------
848 857 cursor_to_position : reciprocal of this function
849 858
850 859
851 860 """
852 861
853 862 assert 0 < offset <= len(text) , "0 < %s <= %s" % (offset , len(text))
854 863
855 864 before = text[:offset]
856 865 blines = before.split('\n') # ! splitnes trim trailing \n
857 866 line = before.count('\n')
858 867 col = len(blines[-1])
859 868 return line, col
860 869
861 870
862 871 def _safe_isinstance(obj, module, class_name):
863 872 """Checks if obj is an instance of module.class_name if loaded
864 873 """
865 874 return (module in sys.modules and
866 875 isinstance(obj, getattr(import_module(module), class_name)))
867 876
868 877
869 878 def back_unicode_name_matches(text):
870 879 u"""Match unicode characters back to unicode name
871 880
872 881 This does ``β˜ƒ`` -> ``\\snowman``
873 882
874 883 Note that snowman is not a valid python3 combining character but will be expanded.
875 884 Though it will not recombine back to the snowman character by the completion machinery.
876 885
877 886 This will not either back-complete standard sequences like \\n, \\b ...
878 887
879 888 Used on Python 3 only.
880 889 """
881 890 if len(text)<2:
882 891 return u'', ()
883 892 maybe_slash = text[-2]
884 893 if maybe_slash != '\\':
885 894 return u'', ()
886 895
887 896 char = text[-1]
888 897 # no expand on quote for completion in strings.
889 898 # nor backcomplete standard ascii keys
890 899 if char in string.ascii_letters or char in ['"',"'"]:
891 900 return u'', ()
892 901 try :
893 902 unic = unicodedata.name(char)
894 903 return '\\'+char,['\\'+unic]
895 904 except KeyError:
896 905 pass
897 906 return u'', ()
898 907
899 908 def back_latex_name_matches(text:str):
900 909 """Match latex characters back to unicode name
901 910
902 911 This does ``\\β„΅`` -> ``\\aleph``
903 912
904 913 Used on Python 3 only.
905 914 """
906 915 if len(text)<2:
907 916 return u'', ()
908 917 maybe_slash = text[-2]
909 918 if maybe_slash != '\\':
910 919 return u'', ()
911 920
912 921
913 922 char = text[-1]
914 923 # no expand on quote for completion in strings.
915 924 # nor backcomplete standard ascii keys
916 925 if char in string.ascii_letters or char in ['"',"'"]:
917 926 return u'', ()
918 927 try :
919 928 latex = reverse_latex_symbol[char]
920 929 # '\\' replace the \ as well
921 930 return '\\'+char,[latex]
922 931 except KeyError:
923 932 pass
924 933 return u'', ()
925 934
926 935
927 936 class IPCompleter(Completer):
928 937 """Extension of the completer class with IPython-specific features"""
929 938
930 939 @observe('greedy')
931 940 def _greedy_changed(self, change):
932 941 """update the splitter and readline delims when greedy is changed"""
933 942 if change['new']:
934 943 self.splitter.delims = GREEDY_DELIMS
935 944 else:
936 945 self.splitter.delims = DELIMS
937 946
938 947 merge_completions = Bool(True,
939 948 help="""Whether to merge completion results into a single list
940 949
941 950 If False, only the completion results from the first non-empty
942 951 completer will be returned.
943 952 """
944 953 ).tag(config=True)
945 954 omit__names = Enum((0,1,2), default_value=2,
946 955 help="""Instruct the completer to omit private method names
947 956
948 957 Specifically, when completing on ``object.<tab>``.
949 958
950 959 When 2 [default]: all names that start with '_' will be excluded.
951 960
952 961 When 1: all 'magic' names (``__foo__``) will be excluded.
953 962
954 963 When 0: nothing will be excluded.
955 964 """
956 965 ).tag(config=True)
957 966 limit_to__all__ = Bool(False,
958 967 help="""
959 968 DEPRECATED as of version 5.0.
960 969
961 970 Instruct the completer to use __all__ for the completion
962 971
963 972 Specifically, when completing on ``object.<tab>``.
964 973
965 974 When True: only those names in obj.__all__ will be included.
966 975
967 976 When False [default]: the __all__ attribute is ignored
968 977 """,
969 978 ).tag(config=True)
970 979
971 980 @observe('limit_to__all__')
972 981 def _limit_to_all_changed(self, change):
973 982 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
974 983 'value has been deprecated since IPython 5.0, will be made to have '
975 984 'no effects and then removed in future version of IPython.',
976 985 UserWarning)
977 986
978 987 def __init__(self, shell=None, namespace=None, global_namespace=None,
979 988 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
980 989 """IPCompleter() -> completer
981 990
982 991 Return a completer object.
983 992
984 993 Parameters
985 994 ----------
986 995
987 996 shell
988 997 a pointer to the ipython shell itself. This is needed
989 998 because this completer knows about magic functions, and those can
990 999 only be accessed via the ipython instance.
991 1000
992 1001 namespace : dict, optional
993 1002 an optional dict where completions are performed.
994 1003
995 1004 global_namespace : dict, optional
996 1005 secondary optional dict for completions, to
997 1006 handle cases (such as IPython embedded inside functions) where
998 1007 both Python scopes are visible.
999 1008
1000 1009 use_readline : bool, optional
1001 1010 DEPRECATED, ignored since IPython 6.0, will have no effects
1002 1011 """
1003 1012
1004 1013 self.magic_escape = ESC_MAGIC
1005 1014 self.splitter = CompletionSplitter()
1006 1015
1007 1016 if use_readline is not _deprecation_readline_sentinel:
1008 1017 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
1009 1018 DeprecationWarning, stacklevel=2)
1010 1019
1011 1020 # _greedy_changed() depends on splitter and readline being defined:
1012 1021 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
1013 1022 config=config, **kwargs)
1014 1023
1015 1024 # List where completion matches will be stored
1016 1025 self.matches = []
1017 1026 self.shell = shell
1018 1027 # Regexp to split filenames with spaces in them
1019 1028 self.space_name_re = re.compile(r'([^\\] )')
1020 1029 # Hold a local ref. to glob.glob for speed
1021 1030 self.glob = glob.glob
1022 1031
1023 1032 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1024 1033 # buffers, to avoid completion problems.
1025 1034 term = os.environ.get('TERM','xterm')
1026 1035 self.dumb_terminal = term in ['dumb','emacs']
1027 1036
1028 1037 # Special handling of backslashes needed in win32 platforms
1029 1038 if sys.platform == "win32":
1030 1039 self.clean_glob = self._clean_glob_win32
1031 1040 else:
1032 1041 self.clean_glob = self._clean_glob
1033 1042
1034 1043 #regexp to parse docstring for function signature
1035 1044 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1036 1045 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1037 1046 #use this if positional argument name is also needed
1038 1047 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1039 1048
1040 1049 # All active matcher routines for completion
1041 1050 self.matchers = [
1042 1051 self.python_matches,
1043 1052 self.file_matches,
1044 1053 self.magic_matches,
1045 1054 self.python_func_kw_matches,
1046 1055 self.dict_key_matches,
1047 1056 ]
1048 1057
1049 1058 # This is set externally by InteractiveShell
1050 1059 self.custom_completers = None
1051 1060
1052 1061 def all_completions(self, text):
1053 1062 """
1054 1063 Wrapper around the complete method for the benefit of emacs.
1055 1064 """
1056 1065 return self.complete(text)[1]
1057 1066
1058 1067 def _clean_glob(self, text):
1059 1068 return self.glob("%s*" % text)
1060 1069
1061 1070 def _clean_glob_win32(self,text):
1062 1071 return [f.replace("\\","/")
1063 1072 for f in self.glob("%s*" % text)]
1064 1073
1065 1074 def file_matches(self, text):
1066 1075 """Match filenames, expanding ~USER type strings.
1067 1076
1068 1077 Most of the seemingly convoluted logic in this completer is an
1069 1078 attempt to handle filenames with spaces in them. And yet it's not
1070 1079 quite perfect, because Python's readline doesn't expose all of the
1071 1080 GNU readline details needed for this to be done correctly.
1072 1081
1073 1082 For a filename with a space in it, the printed completions will be
1074 1083 only the parts after what's already been typed (instead of the
1075 1084 full completions, as is normally done). I don't think with the
1076 1085 current (as of Python 2.3) Python readline it's possible to do
1077 1086 better."""
1078 1087
1079 1088 # chars that require escaping with backslash - i.e. chars
1080 1089 # that readline treats incorrectly as delimiters, but we
1081 1090 # don't want to treat as delimiters in filename matching
1082 1091 # when escaped with backslash
1083 1092 if text.startswith('!'):
1084 1093 text = text[1:]
1085 1094 text_prefix = u'!'
1086 1095 else:
1087 1096 text_prefix = u''
1088 1097
1089 1098 text_until_cursor = self.text_until_cursor
1090 1099 # track strings with open quotes
1091 1100 open_quotes = has_open_quotes(text_until_cursor)
1092 1101
1093 1102 if '(' in text_until_cursor or '[' in text_until_cursor:
1094 1103 lsplit = text
1095 1104 else:
1096 1105 try:
1097 1106 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1098 1107 lsplit = arg_split(text_until_cursor)[-1]
1099 1108 except ValueError:
1100 1109 # typically an unmatched ", or backslash without escaped char.
1101 1110 if open_quotes:
1102 1111 lsplit = text_until_cursor.split(open_quotes)[-1]
1103 1112 else:
1104 1113 return []
1105 1114 except IndexError:
1106 1115 # tab pressed on empty line
1107 1116 lsplit = ""
1108 1117
1109 1118 if not open_quotes and lsplit != protect_filename(lsplit):
1110 1119 # if protectables are found, do matching on the whole escaped name
1111 1120 has_protectables = True
1112 1121 text0,text = text,lsplit
1113 1122 else:
1114 1123 has_protectables = False
1115 1124 text = os.path.expanduser(text)
1116 1125
1117 1126 if text == "":
1118 1127 return [text_prefix + cast_unicode_py2(protect_filename(f)) for f in self.glob("*")]
1119 1128
1120 1129 # Compute the matches from the filesystem
1121 1130 if sys.platform == 'win32':
1122 1131 m0 = self.clean_glob(text)
1123 1132 else:
1124 1133 m0 = self.clean_glob(text.replace('\\', ''))
1125 1134
1126 1135 if has_protectables:
1127 1136 # If we had protectables, we need to revert our changes to the
1128 1137 # beginning of filename so that we don't double-write the part
1129 1138 # of the filename we have so far
1130 1139 len_lsplit = len(lsplit)
1131 1140 matches = [text_prefix + text0 +
1132 1141 protect_filename(f[len_lsplit:]) for f in m0]
1133 1142 else:
1134 1143 if open_quotes:
1135 1144 # if we have a string with an open quote, we don't need to
1136 1145 # protect the names at all (and we _shouldn't_, as it
1137 1146 # would cause bugs when the filesystem call is made).
1138 1147 matches = m0
1139 1148 else:
1140 1149 matches = [text_prefix +
1141 1150 protect_filename(f) for f in m0]
1142 1151
1143 1152 # Mark directories in input list by appending '/' to their names.
1144 1153 return [cast_unicode_py2(x+'/') if os.path.isdir(x) else x for x in matches]
1145 1154
1146 1155 def magic_matches(self, text):
1147 1156 """Match magics"""
1148 1157 # Get all shell magics now rather than statically, so magics loaded at
1149 1158 # runtime show up too.
1150 1159 lsm = self.shell.magics_manager.lsmagic()
1151 1160 line_magics = lsm['line']
1152 1161 cell_magics = lsm['cell']
1153 1162 pre = self.magic_escape
1154 1163 pre2 = pre+pre
1155 1164
1156 1165 # Completion logic:
1157 1166 # - user gives %%: only do cell magics
1158 1167 # - user gives %: do both line and cell magics
1159 1168 # - no prefix: do both
1160 1169 # In other words, line magics are skipped if the user gives %% explicitly
1161 1170 bare_text = text.lstrip(pre)
1162 1171 comp = [ pre2+m for m in cell_magics if m.startswith(bare_text)]
1163 1172 if not text.startswith(pre2):
1164 1173 comp += [ pre+m for m in line_magics if m.startswith(bare_text)]
1165 1174 return [cast_unicode_py2(c) for c in comp]
1166 1175
1167 1176 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str):
1168 1177 """
1169 1178
1170 1179 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1171 1180 cursor position.
1172 1181
1173 1182 Parameters
1174 1183 ----------
1175 1184 cursor_column : int
1176 1185 column position of the cursor in ``text``, 0-indexed.
1177 1186 cursor_line : int
1178 1187 line position of the cursor in ``text``, 0-indexed
1179 1188 text : str
1180 1189 text to complete
1181 1190
1182 1191 Debugging
1183 1192 ---------
1184 1193
1185 1194 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1186 1195 object containing a string with the Jedi debug information attached.
1187 1196 """
1188 1197 namespaces = [self.namespace]
1189 1198 if self.global_namespace is not None:
1190 1199 namespaces.append(self.global_namespace)
1191 1200
1192 1201 completion_filter = lambda x:x
1193 1202 # cursor_pos is an it, jedi wants line and column
1194 1203 offset = cursor_to_position(text, cursor_line, cursor_column)
1195 1204 # filter output if we are completing for object members
1196 1205 if offset:
1197 1206 pre = text[offset-1]
1198 1207 if pre == '.':
1199 1208 if self.omit__names == 2:
1200 1209 completion_filter = lambda c:not c.name.startswith('_')
1201 1210 elif self.omit__names == 1:
1202 1211 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1203 1212 elif self.omit__names == 0:
1204 1213 completion_filter = lambda x:x
1205 1214 else:
1206 1215 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1207 1216
1208 1217 interpreter = jedi.Interpreter(
1209 1218 text, namespaces, column=cursor_column, line=cursor_line + 1)
1210 1219
1211 1220 try_jedi = False
1212 1221
1213 1222 try:
1214 1223 # should we check the type of the node is Error ?
1215 1224 from jedi.parser.tree import ErrorLeaf
1216 1225 next_to_last_tree = interpreter._get_module().tree_node.children[-2]
1217 1226 completing_string = False
1218 1227 if isinstance(next_to_last_tree, ErrorLeaf):
1219 1228 completing_string = interpreter._get_module().tree_node.children[-2].value[0] in {'"', "'"}
1220 1229 # if we are in a string jedi is likely not the right candidate for
1221 1230 # now. Skip it.
1222 1231 try_jedi = not completing_string
1223 1232 except Exception as e:
1224 1233 # many of things can go wrong, we are using private API just don't crash.
1225 1234 if self.debug:
1226 1235 print("Error detecting if completing a non-finished string :", e, '|')
1227 1236
1228 1237 if not try_jedi:
1229 1238 return []
1230 1239 try:
1231 1240 return filter(completion_filter, interpreter.completions())
1232 1241 except Exception as e:
1233 1242 if self.debug:
1234 1243 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1235 1244 else:
1236 1245 return []
1237 1246
1238 1247 def python_matches(self, text):
1239 1248 """Match attributes or global python names"""
1240 1249 if "." in text:
1241 1250 try:
1242 1251 matches = self.attr_matches(text)
1243 1252 if text.endswith('.') and self.omit__names:
1244 1253 if self.omit__names == 1:
1245 1254 # true if txt is _not_ a __ name, false otherwise:
1246 1255 no__name = (lambda txt:
1247 1256 re.match(r'.*\.__.*?__',txt) is None)
1248 1257 else:
1249 1258 # true if txt is _not_ a _ name, false otherwise:
1250 1259 no__name = (lambda txt:
1251 1260 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1252 1261 matches = filter(no__name, matches)
1253 1262 except NameError:
1254 1263 # catches <undefined attributes>.<tab>
1255 1264 matches = []
1256 1265 else:
1257 1266 matches = self.global_matches(text)
1258 1267 return matches
1259 1268
1260 1269 def _default_arguments_from_docstring(self, doc):
1261 1270 """Parse the first line of docstring for call signature.
1262 1271
1263 1272 Docstring should be of the form 'min(iterable[, key=func])\n'.
1264 1273 It can also parse cython docstring of the form
1265 1274 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1266 1275 """
1267 1276 if doc is None:
1268 1277 return []
1269 1278
1270 1279 #care only the firstline
1271 1280 line = doc.lstrip().splitlines()[0]
1272 1281
1273 1282 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1274 1283 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1275 1284 sig = self.docstring_sig_re.search(line)
1276 1285 if sig is None:
1277 1286 return []
1278 1287 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1279 1288 sig = sig.groups()[0].split(',')
1280 1289 ret = []
1281 1290 for s in sig:
1282 1291 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1283 1292 ret += self.docstring_kwd_re.findall(s)
1284 1293 return ret
1285 1294
1286 1295 def _default_arguments(self, obj):
1287 1296 """Return the list of default arguments of obj if it is callable,
1288 1297 or empty list otherwise."""
1289 1298 call_obj = obj
1290 1299 ret = []
1291 1300 if inspect.isbuiltin(obj):
1292 1301 pass
1293 1302 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1294 1303 if inspect.isclass(obj):
1295 1304 #for cython embededsignature=True the constructor docstring
1296 1305 #belongs to the object itself not __init__
1297 1306 ret += self._default_arguments_from_docstring(
1298 1307 getattr(obj, '__doc__', ''))
1299 1308 # for classes, check for __init__,__new__
1300 1309 call_obj = (getattr(obj, '__init__', None) or
1301 1310 getattr(obj, '__new__', None))
1302 1311 # for all others, check if they are __call__able
1303 1312 elif hasattr(obj, '__call__'):
1304 1313 call_obj = obj.__call__
1305 1314 ret += self._default_arguments_from_docstring(
1306 1315 getattr(call_obj, '__doc__', ''))
1307 1316
1308 1317 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1309 1318 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1310 1319
1311 1320 try:
1312 1321 sig = inspect.signature(call_obj)
1313 1322 ret.extend(k for k, v in sig.parameters.items() if
1314 1323 v.kind in _keeps)
1315 1324 except ValueError:
1316 1325 pass
1317 1326
1318 1327 return list(set(ret))
1319 1328
1320 1329 def python_func_kw_matches(self,text):
1321 1330 """Match named parameters (kwargs) of the last open function"""
1322 1331
1323 1332 if "." in text: # a parameter cannot be dotted
1324 1333 return []
1325 1334 try: regexp = self.__funcParamsRegex
1326 1335 except AttributeError:
1327 1336 regexp = self.__funcParamsRegex = re.compile(r'''
1328 1337 '.*?(?<!\\)' | # single quoted strings or
1329 1338 ".*?(?<!\\)" | # double quoted strings or
1330 1339 \w+ | # identifier
1331 1340 \S # other characters
1332 1341 ''', re.VERBOSE | re.DOTALL)
1333 1342 # 1. find the nearest identifier that comes before an unclosed
1334 1343 # parenthesis before the cursor
1335 1344 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1336 1345 tokens = regexp.findall(self.text_until_cursor)
1337 1346 iterTokens = reversed(tokens); openPar = 0
1338 1347
1339 1348 for token in iterTokens:
1340 1349 if token == ')':
1341 1350 openPar -= 1
1342 1351 elif token == '(':
1343 1352 openPar += 1
1344 1353 if openPar > 0:
1345 1354 # found the last unclosed parenthesis
1346 1355 break
1347 1356 else:
1348 1357 return []
1349 1358 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1350 1359 ids = []
1351 1360 isId = re.compile(r'\w+$').match
1352 1361
1353 1362 while True:
1354 1363 try:
1355 1364 ids.append(next(iterTokens))
1356 1365 if not isId(ids[-1]):
1357 1366 ids.pop(); break
1358 1367 if not next(iterTokens) == '.':
1359 1368 break
1360 1369 except StopIteration:
1361 1370 break
1362 1371
1363 1372 # Find all named arguments already assigned to, as to avoid suggesting
1364 1373 # them again
1365 1374 usedNamedArgs = set()
1366 1375 par_level = -1
1367 1376 for token, next_token in zip(tokens, tokens[1:]):
1368 1377 if token == '(':
1369 1378 par_level += 1
1370 1379 elif token == ')':
1371 1380 par_level -= 1
1372 1381
1373 1382 if par_level != 0:
1374 1383 continue
1375 1384
1376 1385 if next_token != '=':
1377 1386 continue
1378 1387
1379 1388 usedNamedArgs.add(token)
1380 1389
1381 1390 # lookup the candidate callable matches either using global_matches
1382 1391 # or attr_matches for dotted names
1383 1392 if len(ids) == 1:
1384 1393 callableMatches = self.global_matches(ids[0])
1385 1394 else:
1386 1395 callableMatches = self.attr_matches('.'.join(ids[::-1]))
1387 1396 argMatches = []
1388 1397 for callableMatch in callableMatches:
1389 1398 try:
1390 1399 namedArgs = self._default_arguments(eval(callableMatch,
1391 1400 self.namespace))
1392 1401 except:
1393 1402 continue
1394 1403
1395 1404 # Remove used named arguments from the list, no need to show twice
1396 1405 for namedArg in set(namedArgs) - usedNamedArgs:
1397 1406 if namedArg.startswith(text):
1398 1407 argMatches.append(u"%s=" %namedArg)
1399 1408 return argMatches
1400 1409
1401 1410 def dict_key_matches(self, text):
1402 1411 "Match string keys in a dictionary, after e.g. 'foo[' "
1403 1412 def get_keys(obj):
1404 1413 # Objects can define their own completions by defining an
1405 1414 # _ipy_key_completions_() method.
1406 1415 method = get_real_method(obj, '_ipython_key_completions_')
1407 1416 if method is not None:
1408 1417 return method()
1409 1418
1410 1419 # Special case some common in-memory dict-like types
1411 1420 if isinstance(obj, dict) or\
1412 1421 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1413 1422 try:
1414 1423 return list(obj.keys())
1415 1424 except Exception:
1416 1425 return []
1417 1426 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1418 1427 _safe_isinstance(obj, 'numpy', 'void'):
1419 1428 return obj.dtype.names or []
1420 1429 return []
1421 1430
1422 1431 try:
1423 1432 regexps = self.__dict_key_regexps
1424 1433 except AttributeError:
1425 1434 dict_key_re_fmt = r'''(?x)
1426 1435 ( # match dict-referring expression wrt greedy setting
1427 1436 %s
1428 1437 )
1429 1438 \[ # open bracket
1430 1439 \s* # and optional whitespace
1431 1440 ([uUbB]? # string prefix (r not handled)
1432 1441 (?: # unclosed string
1433 1442 '(?:[^']|(?<!\\)\\')*
1434 1443 |
1435 1444 "(?:[^"]|(?<!\\)\\")*
1436 1445 )
1437 1446 )?
1438 1447 $
1439 1448 '''
1440 1449 regexps = self.__dict_key_regexps = {
1441 1450 False: re.compile(dict_key_re_fmt % '''
1442 1451 # identifiers separated by .
1443 1452 (?!\d)\w+
1444 1453 (?:\.(?!\d)\w+)*
1445 1454 '''),
1446 1455 True: re.compile(dict_key_re_fmt % '''
1447 1456 .+
1448 1457 ''')
1449 1458 }
1450 1459
1451 1460 match = regexps[self.greedy].search(self.text_until_cursor)
1452 1461 if match is None:
1453 1462 return []
1454 1463
1455 1464 expr, prefix = match.groups()
1456 1465 try:
1457 1466 obj = eval(expr, self.namespace)
1458 1467 except Exception:
1459 1468 try:
1460 1469 obj = eval(expr, self.global_namespace)
1461 1470 except Exception:
1462 1471 return []
1463 1472
1464 1473 keys = get_keys(obj)
1465 1474 if not keys:
1466 1475 return keys
1467 1476 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1468 1477 if not matches:
1469 1478 return matches
1470 1479
1471 1480 # get the cursor position of
1472 1481 # - the text being completed
1473 1482 # - the start of the key text
1474 1483 # - the start of the completion
1475 1484 text_start = len(self.text_until_cursor) - len(text)
1476 1485 if prefix:
1477 1486 key_start = match.start(2)
1478 1487 completion_start = key_start + token_offset
1479 1488 else:
1480 1489 key_start = completion_start = match.end()
1481 1490
1482 1491 # grab the leading prefix, to make sure all completions start with `text`
1483 1492 if text_start > key_start:
1484 1493 leading = ''
1485 1494 else:
1486 1495 leading = text[text_start:completion_start]
1487 1496
1488 1497 # the index of the `[` character
1489 1498 bracket_idx = match.end(1)
1490 1499
1491 1500 # append closing quote and bracket as appropriate
1492 1501 # this is *not* appropriate if the opening quote or bracket is outside
1493 1502 # the text given to this method
1494 1503 suf = ''
1495 1504 continuation = self.line_buffer[len(self.text_until_cursor):]
1496 1505 if key_start > text_start and closing_quote:
1497 1506 # quotes were opened inside text, maybe close them
1498 1507 if continuation.startswith(closing_quote):
1499 1508 continuation = continuation[len(closing_quote):]
1500 1509 else:
1501 1510 suf += closing_quote
1502 1511 if bracket_idx > text_start:
1503 1512 # brackets were opened inside text, maybe close them
1504 1513 if not continuation.startswith(']'):
1505 1514 suf += ']'
1506 1515
1507 1516 return [leading + k + suf for k in matches]
1508 1517
1509 1518 def unicode_name_matches(self, text):
1510 1519 u"""Match Latex-like syntax for unicode characters base
1511 1520 on the name of the character.
1512 1521
1513 1522 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
1514 1523
1515 1524 Works only on valid python 3 identifier, or on combining characters that
1516 1525 will combine to form a valid identifier.
1517 1526
1518 1527 Used on Python 3 only.
1519 1528 """
1520 1529 slashpos = text.rfind('\\')
1521 1530 if slashpos > -1:
1522 1531 s = text[slashpos+1:]
1523 1532 try :
1524 1533 unic = unicodedata.lookup(s)
1525 1534 # allow combining chars
1526 1535 if ('a'+unic).isidentifier():
1527 1536 return '\\'+s,[unic]
1528 1537 except KeyError:
1529 1538 pass
1530 1539 return u'', []
1531 1540
1532 1541
1533 1542 def latex_matches(self, text):
1534 1543 u"""Match Latex syntax for unicode characters.
1535 1544
1536 1545 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
1537 1546
1538 1547 Used on Python 3 only.
1539 1548 """
1540 1549 slashpos = text.rfind('\\')
1541 1550 if slashpos > -1:
1542 1551 s = text[slashpos:]
1543 1552 if s in latex_symbols:
1544 1553 # Try to complete a full latex symbol to unicode
1545 1554 # \\alpha -> Ξ±
1546 1555 return s, [latex_symbols[s]]
1547 1556 else:
1548 1557 # If a user has partially typed a latex symbol, give them
1549 1558 # a full list of options \al -> [\aleph, \alpha]
1550 1559 matches = [k for k in latex_symbols if k.startswith(s)]
1551 1560 return s, matches
1552 1561 return u'', []
1553 1562
1554 1563 def dispatch_custom_completer(self, text):
1555 1564 if not self.custom_completers:
1556 1565 return
1557 1566
1558 1567 line = self.line_buffer
1559 1568 if not line.strip():
1560 1569 return None
1561 1570
1562 1571 # Create a little structure to pass all the relevant information about
1563 1572 # the current completion to any custom completer.
1564 1573 event = SimpleNamespace()
1565 1574 event.line = line
1566 1575 event.symbol = text
1567 1576 cmd = line.split(None,1)[0]
1568 1577 event.command = cmd
1569 1578 event.text_until_cursor = self.text_until_cursor
1570 1579
1571 1580 # for foo etc, try also to find completer for %foo
1572 1581 if not cmd.startswith(self.magic_escape):
1573 1582 try_magic = self.custom_completers.s_matches(
1574 1583 self.magic_escape + cmd)
1575 1584 else:
1576 1585 try_magic = []
1577 1586
1578 1587 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1579 1588 try_magic,
1580 1589 self.custom_completers.flat_matches(self.text_until_cursor)):
1581 1590 try:
1582 1591 res = c(event)
1583 1592 if res:
1584 1593 # first, try case sensitive match
1585 1594 withcase = [cast_unicode_py2(r) for r in res if r.startswith(text)]
1586 1595 if withcase:
1587 1596 return withcase
1588 1597 # if none, then case insensitive ones are ok too
1589 1598 text_low = text.lower()
1590 1599 return [cast_unicode_py2(r) for r in res if r.lower().startswith(text_low)]
1591 1600 except TryNext:
1592 1601 pass
1593 1602
1594 1603 return None
1595 1604
1596 1605 def completions(self, text: str, offset: int)->Iterator[Completion]:
1597 1606 """
1598 1607 Returns an iterator over the possible completions
1599 1608
1600 1609 .. warning:: Unstable
1601 1610
1602 1611 This function is unstable, API may change without warning.
1603 1612 It will also raise unless use in proper context manager.
1604 1613
1605 1614 Parameters
1606 1615 ----------
1607 1616
1608 1617 text:str
1609 1618 Full text of the current input, multi line string.
1610 1619 offset:int
1611 1620 Integer representing the position of the cursor in ``text``. Offset
1612 1621 is 0-based indexed.
1613 1622
1614 1623 Yields
1615 1624 ------
1616 1625 :any:`Completion` object
1617 1626
1618 1627
1619 1628 The cursor on a text can either be seen as being "in between"
1620 1629 characters or "On" a character depending on the interface visible to
1621 1630 the user. For consistency the cursor being on "in between" characters X
1622 1631 and Y is equivalent to the cursor being "on" character Y, that is to say
1623 1632 the character the cursor is on is considered as being after the cursor.
1624 1633
1625 1634 Combining characters may span more that one position in the
1626 1635 text.
1627 1636
1628 1637
1629 1638 .. note::
1630 1639
1631 1640 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1632 1641 fake Completion token to distinguish completion returned by Jedi
1633 1642 and usual IPython completion.
1634 1643
1635 1644 .. note::
1636 1645
1637 1646 Completions are not completely deduplicated yet. If identical
1638 1647 completions are coming from different sources this function does not
1639 1648 ensure that each completion object will only be present once.
1640 1649 """
1641 1650 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1642 1651 "It may change without warnings. "
1643 1652 "Use in corresponding context manager.",
1644 1653 category=ProvisionalCompleterWarning, stacklevel=2)
1645 1654
1646 1655 seen = set()
1647 1656 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1648 1657 if c and (c in seen):
1649 1658 continue
1650 1659 yield c
1651 1660 seen.add(c)
1652 1661
1653 1662 def _completions(self, full_text: str, offset: int, *, _timeout)->Iterator[Completion]:
1654 1663 """
1655 1664 Core completion module.Same signature as :any:`completions`, with the
1656 1665 extra `timeout` parameter (in seconds).
1657 1666
1658 1667
1659 1668 Computing jedi's completion ``.type`` can be quite expensive (it is a
1660 1669 lazy property) and can require some warm-up, more warm up than just
1661 1670 computing the ``name`` of a completion. The warm-up can be :
1662 1671
1663 1672 - Long warm-up the fisrt time a module is encountered after
1664 1673 install/update: actually build parse/inference tree.
1665 1674
1666 1675 - first time the module is encountered in a session: load tree from
1667 1676 disk.
1668 1677
1669 1678 We don't want to block completions for tens of seconds so we give the
1670 1679 completer a "budget" of ``_timeout`` seconds per invocation to compute
1671 1680 completions types, the completions that have not yet been computed will
1672 1681 be marked as "unknown" an will have a chance to be computed next round
1673 1682 are things get cached.
1674 1683
1675 1684 Keep in mind that Jedi is not the only thing treating the completion so
1676 1685 keep the timeout short-ish as if we take more than 0.3 second we still
1677 1686 have lots of processing to do.
1678 1687
1679 1688 """
1680 1689 deadline = time.monotonic() + _timeout
1681 1690
1682 1691
1683 1692 before = full_text[:offset]
1684 1693 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1685 1694
1686 1695 matched_text, matches, matches_origin, jedi_matches = self._complete(
1687 1696 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1688 1697
1689 1698 iter_jm = iter(jedi_matches)
1690 1699 if _timeout:
1691 1700 for jm in iter_jm:
1692 1701 try:
1693 1702 type_ = jm.type
1694 1703 except Exception:
1695 1704 if self.debug:
1696 1705 print("Error in Jedi getting type of ", jm)
1697 1706 type_ = None
1698 1707 delta = len(jm.name_with_symbols) - len(jm.complete)
1699 1708 yield Completion(start=offset - delta,
1700 1709 end=offset,
1701 1710 text=jm.name_with_symbols,
1702 1711 type=type_,
1703 1712 _origin='jedi')
1704 1713
1705 1714 if time.monotonic() > deadline:
1706 1715 break
1707 1716
1708 1717 for jm in iter_jm:
1709 1718 delta = len(jm.name_with_symbols) - len(jm.complete)
1710 1719 yield Completion(start=offset - delta,
1711 1720 end=offset,
1712 1721 text=jm.name_with_symbols,
1713 1722 type='<unknown>', # don't compute type for speed
1714 1723 _origin='jedi')
1715 1724
1716 1725
1717 1726 start_offset = before.rfind(matched_text)
1718 1727
1719 1728 # TODO:
1720 1729 # Supress this, right now just for debug.
1721 1730 if jedi_matches and matches and self.debug:
1722 1731 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--', _origin='debug')
1723 1732
1724 1733 # I'm unsure if this is always true, so let's assert and see if it
1725 1734 # crash
1726 1735 assert before.endswith(matched_text)
1727 1736 for m, t in zip(matches, matches_origin):
1728 1737 yield Completion(start=start_offset, end=offset, text=m, _origin=t)
1729 1738
1730 1739
1731 1740 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1732 1741 """Find completions for the given text and line context.
1733 1742
1734 1743 Note that both the text and the line_buffer are optional, but at least
1735 1744 one of them must be given.
1736 1745
1737 1746 Parameters
1738 1747 ----------
1739 1748 text : string, optional
1740 1749 Text to perform the completion on. If not given, the line buffer
1741 1750 is split using the instance's CompletionSplitter object.
1742 1751
1743 1752 line_buffer : string, optional
1744 1753 If not given, the completer attempts to obtain the current line
1745 1754 buffer via readline. This keyword allows clients which are
1746 1755 requesting for text completions in non-readline contexts to inform
1747 1756 the completer of the entire text.
1748 1757
1749 1758 cursor_pos : int, optional
1750 1759 Index of the cursor in the full line buffer. Should be provided by
1751 1760 remote frontends where kernel has no access to frontend state.
1752 1761
1753 1762 Returns
1754 1763 -------
1755 1764 text : str
1756 1765 Text that was actually used in the completion.
1757 1766
1758 1767 matches : list
1759 1768 A list of completion matches.
1760 1769
1761 1770
1762 1771 .. note::
1763 1772
1764 1773 This API is likely to be deprecated and replaced by
1765 1774 :any:`IPCompleter.completions` in the future.
1766 1775
1767 1776
1768 1777 """
1769 1778 warnings.warn('`Completer.complete` is pending deprecation since '
1770 1779 'IPython 6.0 and will be replaced by `Completer.completions`.',
1771 1780 PendingDeprecationWarning)
1772 1781 # potential todo, FOLD the 3rd throw away argument of _complete
1773 1782 # into the first 2 one.
1774 1783 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
1775 1784
1776 1785 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
1777 1786 full_text=None, return_jedi_results=True) -> (str, List[str], List[object]):
1778 1787 """
1779 1788
1780 1789 Like complete but can also returns raw jedi completions as well as the
1781 1790 origin of the completion text. This could (and should) be made much
1782 1791 cleaner but that will be simpler once we drop the old (and stateful)
1783 1792 :any:`complete` API.
1784 1793
1785 1794
1786 1795 With current provisional API, cursor_pos act both (depending on the
1787 1796 caller) as the offset in the ``text`` or ``line_buffer``, or as the
1788 1797 ``column`` when passing multiline strings this could/should be renamed
1789 1798 but would add extra noise.
1790 1799 """
1791 1800
1792 1801 # if the cursor position isn't given, the only sane assumption we can
1793 1802 # make is that it's at the end of the line (the common case)
1794 1803 if cursor_pos is None:
1795 1804 cursor_pos = len(line_buffer) if text is None else len(text)
1796 1805
1797 1806 if self.use_main_ns:
1798 1807 self.namespace = __main__.__dict__
1799 1808
1800 1809 # if text is either None or an empty string, rely on the line buffer
1801 1810 if (not line_buffer) and full_text:
1802 1811 line_buffer = full_text.split('\n')[cursor_line]
1803 1812 if not text:
1804 1813 text = self.splitter.split_line(line_buffer, cursor_pos)
1805 1814
1806 1815 if self.backslash_combining_completions:
1807 1816 # allow deactivation of these on windows.
1808 1817 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1809 1818 latex_text, latex_matches = self.latex_matches(base_text)
1810 1819 if latex_matches:
1811 1820 return latex_text, latex_matches, ['latex_matches']*len(latex_matches), ()
1812 1821 name_text = ''
1813 1822 name_matches = []
1814 1823 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1815 1824 name_text, name_matches = meth(base_text)
1816 1825 if name_text:
1817 1826 return name_text, name_matches, [meth.__qualname__]*len(name_matches), {}
1818 1827
1819 1828
1820 1829 # If no line buffer is given, assume the input text is all there was
1821 1830 if line_buffer is None:
1822 1831 line_buffer = text
1823 1832
1824 1833 self.line_buffer = line_buffer
1825 1834 self.text_until_cursor = self.line_buffer[:cursor_pos]
1826 1835
1827 1836 # Start with a clean slate of completions
1828 1837 matches = []
1829 1838 custom_res = self.dispatch_custom_completer(text)
1830 1839 # FIXME: we should extend our api to return a dict with completions for
1831 1840 # different types of objects. The rlcomplete() method could then
1832 1841 # simply collapse the dict into a list for readline, but we'd have
1833 1842 # richer completion semantics in other evironments.
1834 1843 completions = ()
1835 1844 if self.use_jedi and return_jedi_results:
1836 1845 if not full_text:
1837 1846 full_text = line_buffer
1838 1847 completions = self._jedi_matches(
1839 1848 cursor_pos, cursor_line, full_text)
1840 1849 if custom_res is not None:
1841 1850 # did custom completers produce something?
1842 1851 matches = [(m, 'custom') for m in custom_res]
1843 1852 else:
1844 1853 # Extend the list of completions with the results of each
1845 1854 # matcher, so we return results to the user from all
1846 1855 # namespaces.
1847 1856 if self.merge_completions:
1848 1857 matches = []
1849 1858 for matcher in self.matchers:
1850 1859 try:
1851 1860 matches.extend([(m, matcher.__qualname__)
1852 1861 for m in matcher(text)])
1853 1862 except:
1854 1863 # Show the ugly traceback if the matcher causes an
1855 1864 # exception, but do NOT crash the kernel!
1856 1865 sys.excepthook(*sys.exc_info())
1857 1866 else:
1858 1867 for matcher in self.matchers:
1859 1868 matches = [(m, matcher.__qualname__)
1860 1869 for m in matcher(text)]
1861 1870 if matches:
1862 1871 break
1863 1872 seen = set()
1864 1873 filtered_matches = set()
1865 1874 for m in matches:
1866 1875 t, c = m
1867 1876 if t not in seen:
1868 1877 filtered_matches.add(m)
1869 1878 seen.add(t)
1870 1879
1871 1880 filtered_matches = sorted(
1872 1881 set(filtered_matches), key=lambda x: completions_sorting_key(x[0]))
1873 1882
1874 1883 matches = [m[0] for m in filtered_matches]
1875 1884 origins = [m[1] for m in filtered_matches]
1876 1885
1877 1886 self.matches = matches
1878 1887
1879 1888 return text, matches, origins, completions
@@ -1,860 +1,868 b''
1 1 # encoding: utf-8
2 2 """Tests for the IPython tab-completion machinery."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 import os
8 8 import sys
9 9 import textwrap
10 10 import unittest
11 11
12 12 from contextlib import contextmanager
13 13
14 14 import nose.tools as nt
15 15
16 16 from traitlets.config.loader import Config
17 17 from IPython import get_ipython
18 18 from IPython.core import completer
19 19 from IPython.external.decorators import knownfailureif
20 20 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
21 21 from IPython.utils.generics import complete_object
22 22 from IPython.testing import decorators as dec
23 23
24 24 from IPython.core.completer import (
25 25 Completion, provisionalcompleter, match_dict_keys, _deduplicate_completions)
26 26 from nose.tools import assert_in, assert_not_in
27 27
28 28 #-----------------------------------------------------------------------------
29 29 # Test functions
30 30 #-----------------------------------------------------------------------------
31 31
32 32 @contextmanager
33 33 def greedy_completion():
34 34 ip = get_ipython()
35 35 greedy_original = ip.Completer.greedy
36 36 try:
37 37 ip.Completer.greedy = True
38 38 yield
39 39 finally:
40 40 ip.Completer.greedy = greedy_original
41 41
42 42 def test_protect_filename():
43 43 if sys.platform == 'win32':
44 44 pairs = [('abc','abc'),
45 45 (' abc','" abc"'),
46 46 ('a bc','"a bc"'),
47 47 ('a bc','"a bc"'),
48 48 (' bc','" bc"'),
49 49 ]
50 50 else:
51 51 pairs = [('abc','abc'),
52 52 (' abc',r'\ abc'),
53 53 ('a bc',r'a\ bc'),
54 54 ('a bc',r'a\ \ bc'),
55 55 (' bc',r'\ \ bc'),
56 56 # On posix, we also protect parens and other special characters.
57 57 ('a(bc',r'a\(bc'),
58 58 ('a)bc',r'a\)bc'),
59 59 ('a( )bc',r'a\(\ \)bc'),
60 60 ('a[1]bc', r'a\[1\]bc'),
61 61 ('a{1}bc', r'a\{1\}bc'),
62 62 ('a#bc', r'a\#bc'),
63 63 ('a?bc', r'a\?bc'),
64 64 ('a=bc', r'a\=bc'),
65 65 ('a\\bc', r'a\\bc'),
66 66 ('a|bc', r'a\|bc'),
67 67 ('a;bc', r'a\;bc'),
68 68 ('a:bc', r'a\:bc'),
69 69 ("a'bc", r"a\'bc"),
70 70 ('a*bc', r'a\*bc'),
71 71 ('a"bc', r'a\"bc'),
72 72 ('a^bc', r'a\^bc'),
73 73 ('a&bc', r'a\&bc'),
74 74 ]
75 75 # run the actual tests
76 76 for s1, s2 in pairs:
77 77 s1p = completer.protect_filename(s1)
78 78 nt.assert_equal(s1p, s2)
79 79
80 80
81 81 def check_line_split(splitter, test_specs):
82 82 for part1, part2, split in test_specs:
83 83 cursor_pos = len(part1)
84 84 line = part1+part2
85 85 out = splitter.split_line(line, cursor_pos)
86 86 nt.assert_equal(out, split)
87 87
88 88
89 89 def test_line_split():
90 90 """Basic line splitter test with default specs."""
91 91 sp = completer.CompletionSplitter()
92 92 # The format of the test specs is: part1, part2, expected answer. Parts 1
93 93 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
94 94 # was at the end of part1. So an empty part2 represents someone hitting
95 95 # tab at the end of the line, the most common case.
96 96 t = [('run some/scrip', '', 'some/scrip'),
97 97 ('run scripts/er', 'ror.py foo', 'scripts/er'),
98 98 ('echo $HOM', '', 'HOM'),
99 99 ('print sys.pa', '', 'sys.pa'),
100 100 ('print(sys.pa', '', 'sys.pa'),
101 101 ("execfile('scripts/er", '', 'scripts/er'),
102 102 ('a[x.', '', 'x.'),
103 103 ('a[x.', 'y', 'x.'),
104 104 ('cd "some_file/', '', 'some_file/'),
105 105 ]
106 106 check_line_split(sp, t)
107 107 # Ensure splitting works OK with unicode by re-running the tests with
108 108 # all inputs turned into unicode
109 109 check_line_split(sp, [ map(str, p) for p in t] )
110 110
111 111
112 112 def test_custom_completion_error():
113 113 """Test that errors from custom attribute completers are silenced."""
114 114 ip = get_ipython()
115 115 class A(object): pass
116 116 ip.user_ns['a'] = A()
117 117
118 118 @complete_object.when_type(A)
119 119 def complete_A(a, existing_completions):
120 120 raise TypeError("this should be silenced")
121 121
122 122 ip.complete("a.")
123 123
124 124
125 125 def test_unicode_completions():
126 126 ip = get_ipython()
127 127 # Some strings that trigger different types of completion. Check them both
128 128 # in str and unicode forms
129 129 s = ['ru', '%ru', 'cd /', 'floa', 'float(x)/']
130 130 for t in s + list(map(str, s)):
131 131 # We don't need to check exact completion values (they may change
132 132 # depending on the state of the namespace, but at least no exceptions
133 133 # should be thrown and the return value should be a pair of text, list
134 134 # values.
135 135 text, matches = ip.complete(t)
136 136 nt.assert_true(isinstance(text, str))
137 137 nt.assert_true(isinstance(matches, list))
138 138
139 139 def test_latex_completions():
140 140 from IPython.core.latex_symbols import latex_symbols
141 141 import random
142 142 ip = get_ipython()
143 143 # Test some random unicode symbols
144 144 keys = random.sample(latex_symbols.keys(), 10)
145 145 for k in keys:
146 146 text, matches = ip.complete(k)
147 147 nt.assert_equal(len(matches),1)
148 148 nt.assert_equal(text, k)
149 149 nt.assert_equal(matches[0], latex_symbols[k])
150 150 # Test a more complex line
151 151 text, matches = ip.complete(u'print(\\alpha')
152 152 nt.assert_equal(text, u'\\alpha')
153 153 nt.assert_equal(matches[0], latex_symbols['\\alpha'])
154 154 # Test multiple matching latex symbols
155 155 text, matches = ip.complete(u'\\al')
156 156 nt.assert_in('\\alpha', matches)
157 157 nt.assert_in('\\aleph', matches)
158 158
159 159
160 160
161 161
162 162 def test_back_latex_completion():
163 163 ip = get_ipython()
164 164
165 165 # do not return more than 1 matches fro \beta, only the latex one.
166 166 name, matches = ip.complete('\\Ξ²')
167 167 nt.assert_equal(len(matches), 1)
168 168 nt.assert_equal(matches[0], '\\beta')
169 169
170 170 def test_back_unicode_completion():
171 171 ip = get_ipython()
172 172
173 173 name, matches = ip.complete('\\β…€')
174 174 nt.assert_equal(len(matches), 1)
175 175 nt.assert_equal(matches[0], '\\ROMAN NUMERAL FIVE')
176 176
177 177
178 178 def test_forward_unicode_completion():
179 179 ip = get_ipython()
180 180
181 181 name, matches = ip.complete('\\ROMAN NUMERAL FIVE')
182 182 nt.assert_equal(len(matches), 1)
183 183 nt.assert_equal(matches[0], 'β…€')
184 184
185 185 @dec.knownfailureif(sys.platform == 'win32', 'Fails if there is a C:\\j... path')
186 186 def test_no_ascii_back_completion():
187 187 ip = get_ipython()
188 188 with TemporaryWorkingDirectory(): # Avoid any filename completions
189 189 # single ascii letter that don't have yet completions
190 190 for letter in 'jJ' :
191 191 name, matches = ip.complete('\\'+letter)
192 192 nt.assert_equal(matches, [])
193 193
194 194
195 195
196 196
197 197 class CompletionSplitterTestCase(unittest.TestCase):
198 198 def setUp(self):
199 199 self.sp = completer.CompletionSplitter()
200 200
201 201 def test_delim_setting(self):
202 202 self.sp.delims = ' '
203 203 nt.assert_equal(self.sp.delims, ' ')
204 204 nt.assert_equal(self.sp._delim_expr, '[\ ]')
205 205
206 206 def test_spaces(self):
207 207 """Test with only spaces as split chars."""
208 208 self.sp.delims = ' '
209 209 t = [('foo', '', 'foo'),
210 210 ('run foo', '', 'foo'),
211 211 ('run foo', 'bar', 'foo'),
212 212 ]
213 213 check_line_split(self.sp, t)
214 214
215 215
216 216 def test_has_open_quotes1():
217 217 for s in ["'", "'''", "'hi' '"]:
218 218 nt.assert_equal(completer.has_open_quotes(s), "'")
219 219
220 220
221 221 def test_has_open_quotes2():
222 222 for s in ['"', '"""', '"hi" "']:
223 223 nt.assert_equal(completer.has_open_quotes(s), '"')
224 224
225 225
226 226 def test_has_open_quotes3():
227 227 for s in ["''", "''' '''", "'hi' 'ipython'"]:
228 228 nt.assert_false(completer.has_open_quotes(s))
229 229
230 230
231 231 def test_has_open_quotes4():
232 232 for s in ['""', '""" """', '"hi" "ipython"']:
233 233 nt.assert_false(completer.has_open_quotes(s))
234 234
235 235
236 236 @knownfailureif(sys.platform == 'win32', "abspath completions fail on Windows")
237 237 def test_abspath_file_completions():
238 238 ip = get_ipython()
239 239 with TemporaryDirectory() as tmpdir:
240 240 prefix = os.path.join(tmpdir, 'foo')
241 241 suffixes = ['1', '2']
242 242 names = [prefix+s for s in suffixes]
243 243 for n in names:
244 244 open(n, 'w').close()
245 245
246 246 # Check simple completion
247 247 c = ip.complete(prefix)[1]
248 248 nt.assert_equal(c, names)
249 249
250 250 # Now check with a function call
251 251 cmd = 'a = f("%s' % prefix
252 252 c = ip.complete(prefix, cmd)[1]
253 253 comp = [prefix+s for s in suffixes]
254 254 nt.assert_equal(c, comp)
255 255
256 256
257 257 def test_local_file_completions():
258 258 ip = get_ipython()
259 259 with TemporaryWorkingDirectory():
260 260 prefix = './foo'
261 261 suffixes = ['1', '2']
262 262 names = [prefix+s for s in suffixes]
263 263 for n in names:
264 264 open(n, 'w').close()
265 265
266 266 # Check simple completion
267 267 c = ip.complete(prefix)[1]
268 268 nt.assert_equal(c, names)
269 269
270 270 # Now check with a function call
271 271 cmd = 'a = f("%s' % prefix
272 272 c = ip.complete(prefix, cmd)[1]
273 273 comp = set(prefix+s for s in suffixes)
274 274 nt.assert_true(comp.issubset(set(c)))
275 275
276 276
277 277 def test_jedi():
278 278 """
279 279 A couple of issue we had with Jedi
280 280 """
281 281 ip = get_ipython()
282 282
283 283 def _test_complete(reason, s, comp, start=None, end=None):
284 284 l = len(s)
285 285 start = start if start is not None else l
286 286 end = end if end is not None else l
287 287 with provisionalcompleter():
288 288 completions = set(ip.Completer.completions(s, l))
289 289 assert_in(Completion(start, end, comp), completions, reason)
290 290
291 291 def _test_not_complete(reason, s, comp):
292 292 l = len(s)
293 293 with provisionalcompleter():
294 294 completions = set(ip.Completer.completions(s, l))
295 295 assert_not_in(Completion(l, l, comp), completions, reason)
296 296
297 297 import jedi
298 298 jedi_version = tuple(int(i) for i in jedi.__version__.split('.')[:3])
299 299 if jedi_version > (0, 10):
300 300 yield _test_complete, 'jedi >0.9 should complete and not crash', 'a=1;a.', 'real'
301 301 yield _test_complete, 'can infer first argument', 'a=(1,"foo");a[0].', 'real'
302 302 yield _test_complete, 'can infer second argument', 'a=(1,"foo");a[1].', 'capitalize'
303 303 yield _test_complete, 'cover duplicate completions', 'im', 'import', 0, 2
304 304
305 305 yield _test_not_complete, 'does not mix types', 'a=(1,"foo");a[0].', 'capitalize'
306 306
307 307 def test_deduplicate_completions():
308 308 """
309 309 Test that completions are correctly deduplicated (even if ranges are not the same)
310 310 """
311 311 ip = get_ipython()
312 312 ip.ex(textwrap.dedent('''
313 313 class Z:
314 314 zoo = 1
315 315 '''))
316 316 with provisionalcompleter():
317 317 l = list(_deduplicate_completions('Z.z', ip.Completer.completions('Z.z', 3)))
318 318
319 319 assert len(l) == 1, 'Completions (Z.z<tab>) correctly deduplicate: %s ' % l
320 320 assert l[0].text == 'zoo' # and not `it.accumulate`
321 321
322 322
323 323 def test_greedy_completions():
324 324 """
325 325 Test the capability of the Greedy completer.
326 326
327 327 Most of the test here do not really show off the greedy completer, for proof
328 328 each of the text bellow now pass with Jedi. The greedy completer is capable of more.
329 329
330 330 See the :any:`test_dict_key_completion_contexts`
331 331
332 332 """
333 333 ip = get_ipython()
334 334 ip.ex('a=list(range(5))')
335 335 _,c = ip.complete('.',line='a[0].')
336 336 nt.assert_false('.real' in c,
337 337 "Shouldn't have completed on a[0]: %s"%c)
338 338 with greedy_completion(), provisionalcompleter():
339 339 def _(line, cursor_pos, expect, message, completion):
340 340 _,c = ip.complete('.', line=line, cursor_pos=cursor_pos)
341 341 with provisionalcompleter():
342 342 completions = ip.Completer.completions(line, cursor_pos)
343 343 nt.assert_in(expect, c, message%c)
344 344 nt.assert_in(completion, completions)
345 345
346 346 yield _, 'a[0].', 5, 'a[0].real', "Should have completed on a[0].: %s", Completion(5,5, 'real')
347 347 yield _, 'a[0].r', 6, 'a[0].real', "Should have completed on a[0].r: %s", Completion(5,6, 'real')
348 348
349 349 if sys.version_info > (3, 4):
350 350 yield _, 'a[0].from_', 10, 'a[0].from_bytes', "Should have completed on a[0].from_: %s", Completion(5, 10, 'from_bytes')
351 351
352 352
353 353 def test_omit__names():
354 354 # also happens to test IPCompleter as a configurable
355 355 ip = get_ipython()
356 356 ip._hidden_attr = 1
357 357 ip._x = {}
358 358 c = ip.Completer
359 359 ip.ex('ip=get_ipython()')
360 360 cfg = Config()
361 361 cfg.IPCompleter.omit__names = 0
362 362 c.update_config(cfg)
363 363 with provisionalcompleter():
364 364 s,matches = c.complete('ip.')
365 365 completions = set(c.completions('ip.', 3))
366 366
367 367 nt.assert_in('ip.__str__', matches)
368 368 nt.assert_in(Completion(3, 3, '__str__'), completions)
369 369
370 370 nt.assert_in('ip._hidden_attr', matches)
371 371 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
372 372
373 373
374 374 cfg = Config()
375 375 cfg.IPCompleter.omit__names = 1
376 376 c.update_config(cfg)
377 377 with provisionalcompleter():
378 378 s,matches = c.complete('ip.')
379 379 completions = set(c.completions('ip.', 3))
380 380
381 381 nt.assert_not_in('ip.__str__', matches)
382 382 nt.assert_not_in(Completion(3,3,'__str__'), completions)
383 383
384 384 # nt.assert_in('ip._hidden_attr', matches)
385 385 nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
386 386
387 387 cfg = Config()
388 388 cfg.IPCompleter.omit__names = 2
389 389 c.update_config(cfg)
390 390 with provisionalcompleter():
391 391 s,matches = c.complete('ip.')
392 392 completions = set(c.completions('ip.', 3))
393 393
394 394 nt.assert_not_in('ip.__str__', matches)
395 395 nt.assert_not_in(Completion(3,3,'__str__'), completions)
396 396
397 397 nt.assert_not_in('ip._hidden_attr', matches)
398 398 nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
399 399
400 400 with provisionalcompleter():
401 401 s,matches = c.complete('ip._x.')
402 402 completions = set(c.completions('ip._x.', 6))
403 403
404 404 nt.assert_in('ip._x.keys', matches)
405 405 nt.assert_in(Completion(6,6, "keys"), completions)
406 406
407 407 del ip._hidden_attr
408 408 del ip._x
409 409
410 410
411 411 def test_limit_to__all__False_ok():
412 412 """
413 413 Limit to all is deprecated, once we remove it this test can go away.
414 414 """
415 415 ip = get_ipython()
416 416 c = ip.Completer
417 417 ip.ex('class D: x=24')
418 418 ip.ex('d=D()')
419 419 cfg = Config()
420 420 cfg.IPCompleter.limit_to__all__ = False
421 421 c.update_config(cfg)
422 422 s, matches = c.complete('d.')
423 423 nt.assert_in('d.x', matches)
424 424
425 425
426 426 def test_get__all__entries_ok():
427 427 class A(object):
428 428 __all__ = ['x', 1]
429 429 words = completer.get__all__entries(A())
430 430 nt.assert_equal(words, ['x'])
431 431
432 432
433 433 def test_get__all__entries_no__all__ok():
434 434 class A(object):
435 435 pass
436 436 words = completer.get__all__entries(A())
437 437 nt.assert_equal(words, [])
438 438
439 439
440 440 def test_func_kw_completions():
441 441 ip = get_ipython()
442 442 c = ip.Completer
443 443 ip.ex('def myfunc(a=1,b=2): return a+b')
444 444 s, matches = c.complete(None, 'myfunc(1,b')
445 445 nt.assert_in('b=', matches)
446 446 # Simulate completing with cursor right after b (pos==10):
447 447 s, matches = c.complete(None, 'myfunc(1,b)', 10)
448 448 nt.assert_in('b=', matches)
449 449 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
450 450 nt.assert_in('b=', matches)
451 451 #builtin function
452 452 s, matches = c.complete(None, 'min(k, k')
453 453 nt.assert_in('key=', matches)
454 454
455 455
456 456 def test_default_arguments_from_docstring():
457 457 ip = get_ipython()
458 458 c = ip.Completer
459 459 kwd = c._default_arguments_from_docstring(
460 460 'min(iterable[, key=func]) -> value')
461 461 nt.assert_equal(kwd, ['key'])
462 462 #with cython type etc
463 463 kwd = c._default_arguments_from_docstring(
464 464 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
465 465 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
466 466 #white spaces
467 467 kwd = c._default_arguments_from_docstring(
468 468 '\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n')
469 469 nt.assert_equal(kwd, ['ncall', 'resume', 'nsplit'])
470 470
471 471 def test_line_magics():
472 472 ip = get_ipython()
473 473 c = ip.Completer
474 474 s, matches = c.complete(None, 'lsmag')
475 475 nt.assert_in('%lsmagic', matches)
476 476 s, matches = c.complete(None, '%lsmag')
477 477 nt.assert_in('%lsmagic', matches)
478 478
479 479
480 480 def test_cell_magics():
481 481 from IPython.core.magic import register_cell_magic
482 482
483 483 @register_cell_magic
484 484 def _foo_cellm(line, cell):
485 485 pass
486 486
487 487 ip = get_ipython()
488 488 c = ip.Completer
489 489
490 490 s, matches = c.complete(None, '_foo_ce')
491 491 nt.assert_in('%%_foo_cellm', matches)
492 492 s, matches = c.complete(None, '%%_foo_ce')
493 493 nt.assert_in('%%_foo_cellm', matches)
494 494
495 495
496 496 def test_line_cell_magics():
497 497 from IPython.core.magic import register_line_cell_magic
498 498
499 499 @register_line_cell_magic
500 500 def _bar_cellm(line, cell):
501 501 pass
502 502
503 503 ip = get_ipython()
504 504 c = ip.Completer
505 505
506 506 # The policy here is trickier, see comments in completion code. The
507 507 # returned values depend on whether the user passes %% or not explicitly,
508 508 # and this will show a difference if the same name is both a line and cell
509 509 # magic.
510 510 s, matches = c.complete(None, '_bar_ce')
511 511 nt.assert_in('%_bar_cellm', matches)
512 512 nt.assert_in('%%_bar_cellm', matches)
513 513 s, matches = c.complete(None, '%_bar_ce')
514 514 nt.assert_in('%_bar_cellm', matches)
515 515 nt.assert_in('%%_bar_cellm', matches)
516 516 s, matches = c.complete(None, '%%_bar_ce')
517 517 nt.assert_not_in('%_bar_cellm', matches)
518 518 nt.assert_in('%%_bar_cellm', matches)
519 519
520 520
521 521 def test_magic_completion_order():
522 522
523 523 ip = get_ipython()
524 524 c = ip.Completer
525 525
526 526 # Test ordering of magics and non-magics with the same name
527 527 # We want the non-magic first
528 528
529 529 # Before importing matplotlib, there should only be one option:
530 530
531 531 text, matches = c.complete('mat')
532 532 nt.assert_equal(matches, ["%matplotlib"])
533 533
534 534
535 535 ip.run_cell("matplotlib = 1") # introduce name into namespace
536 536
537 537 # After the import, there should be two options, ordered like this:
538 538 text, matches = c.complete('mat')
539 539 nt.assert_equal(matches, ["matplotlib", "%matplotlib"])
540 540
541 541
542 542 ip.run_cell("timeit = 1") # define a user variable called 'timeit'
543 543
544 544 # Order of user variable and line and cell magics with same name:
545 545 text, matches = c.complete('timeit')
546 546 nt.assert_equal(matches, ["timeit", "%timeit", "%%timeit"])
547 547
548 548 def test_match_dict_keys():
549 549 """
550 550 Test that match_dict_keys works on a couple of use case does return what
551 551 expected, and does not crash
552 552 """
553 553 delims = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
554 554
555 555
556 556 keys = ['foo', b'far']
557 557 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2 ,['far'])
558 558 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2 ,['far'])
559 559 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2 ,['far'])
560 560 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2 ,['far'])
561 561
562 562 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1 ,['foo'])
563 563 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1 ,['foo'])
564 564 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1 ,['foo'])
565 565 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1 ,['foo'])
566 566
567 567 match_dict_keys
568 568
569 569
570 570 def test_dict_key_completion_string():
571 571 """Test dictionary key completion for string keys"""
572 572 ip = get_ipython()
573 573 complete = ip.Completer.complete
574 574
575 575 ip.user_ns['d'] = {'abc': None}
576 576
577 577 # check completion at different stages
578 578 _, matches = complete(line_buffer="d[")
579 579 nt.assert_in("'abc'", matches)
580 580 nt.assert_not_in("'abc']", matches)
581 581
582 582 _, matches = complete(line_buffer="d['")
583 583 nt.assert_in("abc", matches)
584 584 nt.assert_not_in("abc']", matches)
585 585
586 586 _, matches = complete(line_buffer="d['a")
587 587 nt.assert_in("abc", matches)
588 588 nt.assert_not_in("abc']", matches)
589 589
590 590 # check use of different quoting
591 591 _, matches = complete(line_buffer="d[\"")
592 592 nt.assert_in("abc", matches)
593 593 nt.assert_not_in('abc\"]', matches)
594 594
595 595 _, matches = complete(line_buffer="d[\"a")
596 596 nt.assert_in("abc", matches)
597 597 nt.assert_not_in('abc\"]', matches)
598 598
599 599 # check sensitivity to following context
600 600 _, matches = complete(line_buffer="d[]", cursor_pos=2)
601 601 nt.assert_in("'abc'", matches)
602 602
603 603 _, matches = complete(line_buffer="d['']", cursor_pos=3)
604 604 nt.assert_in("abc", matches)
605 605 nt.assert_not_in("abc'", matches)
606 606 nt.assert_not_in("abc']", matches)
607 607
608 608 # check multiple solutions are correctly returned and that noise is not
609 609 ip.user_ns['d'] = {'abc': None, 'abd': None, 'bad': None, object(): None,
610 610 5: None}
611 611
612 612 _, matches = complete(line_buffer="d['a")
613 613 nt.assert_in("abc", matches)
614 614 nt.assert_in("abd", matches)
615 615 nt.assert_not_in("bad", matches)
616 616 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
617 617
618 618 # check escaping and whitespace
619 619 ip.user_ns['d'] = {'a\nb': None, 'a\'b': None, 'a"b': None, 'a word': None}
620 620 _, matches = complete(line_buffer="d['a")
621 621 nt.assert_in("a\\nb", matches)
622 622 nt.assert_in("a\\'b", matches)
623 623 nt.assert_in("a\"b", matches)
624 624 nt.assert_in("a word", matches)
625 625 assert not any(m.endswith((']', '"', "'")) for m in matches), matches
626 626
627 627 # - can complete on non-initial word of the string
628 628 _, matches = complete(line_buffer="d['a w")
629 629 nt.assert_in("word", matches)
630 630
631 631 # - understands quote escaping
632 632 _, matches = complete(line_buffer="d['a\\'")
633 633 nt.assert_in("b", matches)
634 634
635 635 # - default quoting should work like repr
636 636 _, matches = complete(line_buffer="d[")
637 637 nt.assert_in("\"a'b\"", matches)
638 638
639 639 # - when opening quote with ", possible to match with unescaped apostrophe
640 640 _, matches = complete(line_buffer="d[\"a'")
641 641 nt.assert_in("b", matches)
642 642
643 643 # need to not split at delims that readline won't split at
644 644 if '-' not in ip.Completer.splitter.delims:
645 645 ip.user_ns['d'] = {'before-after': None}
646 646 _, matches = complete(line_buffer="d['before-af")
647 647 nt.assert_in('before-after', matches)
648 648
649 649 def test_dict_key_completion_contexts():
650 650 """Test expression contexts in which dict key completion occurs"""
651 651 ip = get_ipython()
652 652 complete = ip.Completer.complete
653 653 d = {'abc': None}
654 654 ip.user_ns['d'] = d
655 655
656 656 class C:
657 657 data = d
658 658 ip.user_ns['C'] = C
659 659 ip.user_ns['get'] = lambda: d
660 660
661 661 def assert_no_completion(**kwargs):
662 662 _, matches = complete(**kwargs)
663 663 nt.assert_not_in('abc', matches)
664 664 nt.assert_not_in('abc\'', matches)
665 665 nt.assert_not_in('abc\']', matches)
666 666 nt.assert_not_in('\'abc\'', matches)
667 667 nt.assert_not_in('\'abc\']', matches)
668 668
669 669 def assert_completion(**kwargs):
670 670 _, matches = complete(**kwargs)
671 671 nt.assert_in("'abc'", matches)
672 672 nt.assert_not_in("'abc']", matches)
673 673
674 674 # no completion after string closed, even if reopened
675 675 assert_no_completion(line_buffer="d['a'")
676 676 assert_no_completion(line_buffer="d[\"a\"")
677 677 assert_no_completion(line_buffer="d['a' + ")
678 678 assert_no_completion(line_buffer="d['a' + '")
679 679
680 680 # completion in non-trivial expressions
681 681 assert_completion(line_buffer="+ d[")
682 682 assert_completion(line_buffer="(d[")
683 683 assert_completion(line_buffer="C.data[")
684 684
685 685 # greedy flag
686 686 def assert_completion(**kwargs):
687 687 _, matches = complete(**kwargs)
688 688 nt.assert_in("get()['abc']", matches)
689 689
690 690 assert_no_completion(line_buffer="get()[")
691 691 with greedy_completion():
692 692 assert_completion(line_buffer="get()[")
693 693 assert_completion(line_buffer="get()['")
694 694 assert_completion(line_buffer="get()['a")
695 695 assert_completion(line_buffer="get()['ab")
696 696 assert_completion(line_buffer="get()['abc")
697 697
698 698
699 699
700 700 def test_dict_key_completion_bytes():
701 701 """Test handling of bytes in dict key completion"""
702 702 ip = get_ipython()
703 703 complete = ip.Completer.complete
704 704
705 705 ip.user_ns['d'] = {'abc': None, b'abd': None}
706 706
707 707 _, matches = complete(line_buffer="d[")
708 708 nt.assert_in("'abc'", matches)
709 709 nt.assert_in("b'abd'", matches)
710 710
711 711 if False: # not currently implemented
712 712 _, matches = complete(line_buffer="d[b")
713 713 nt.assert_in("b'abd'", matches)
714 714 nt.assert_not_in("b'abc'", matches)
715 715
716 716 _, matches = complete(line_buffer="d[b'")
717 717 nt.assert_in("abd", matches)
718 718 nt.assert_not_in("abc", matches)
719 719
720 720 _, matches = complete(line_buffer="d[B'")
721 721 nt.assert_in("abd", matches)
722 722 nt.assert_not_in("abc", matches)
723 723
724 724 _, matches = complete(line_buffer="d['")
725 725 nt.assert_in("abc", matches)
726 726 nt.assert_not_in("abd", matches)
727 727
728 728
729 729 def test_dict_key_completion_unicode_py3():
730 730 """Test handling of unicode in dict key completion"""
731 731 ip = get_ipython()
732 732 complete = ip.Completer.complete
733 733
734 734 ip.user_ns['d'] = {u'a\u05d0': None}
735 735
736 736 # query using escape
737 737 if sys.platform != 'win32':
738 738 # Known failure on Windows
739 739 _, matches = complete(line_buffer="d['a\\u05d0")
740 740 nt.assert_in("u05d0", matches) # tokenized after \\
741 741
742 742 # query using character
743 743 _, matches = complete(line_buffer="d['a\u05d0")
744 744 nt.assert_in(u"a\u05d0", matches)
745 745
746 746 with greedy_completion():
747 747 # query using escape
748 748 _, matches = complete(line_buffer="d['a\\u05d0")
749 749 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
750 750
751 751 # query using character
752 752 _, matches = complete(line_buffer="d['a\u05d0")
753 753 nt.assert_in(u"d['a\u05d0']", matches)
754 754
755 755
756 756
757 757 @dec.skip_without('numpy')
758 758 def test_struct_array_key_completion():
759 759 """Test dict key completion applies to numpy struct arrays"""
760 760 import numpy
761 761 ip = get_ipython()
762 762 complete = ip.Completer.complete
763 763 ip.user_ns['d'] = numpy.array([], dtype=[('hello', 'f'), ('world', 'f')])
764 764 _, matches = complete(line_buffer="d['")
765 765 nt.assert_in("hello", matches)
766 766 nt.assert_in("world", matches)
767 767 # complete on the numpy struct itself
768 768 dt = numpy.dtype([('my_head', [('my_dt', '>u4'), ('my_df', '>u4')]),
769 769 ('my_data', '>f4', 5)])
770 770 x = numpy.zeros(2, dtype=dt)
771 771 ip.user_ns['d'] = x[1]
772 772 _, matches = complete(line_buffer="d['")
773 773 nt.assert_in("my_head", matches)
774 774 nt.assert_in("my_data", matches)
775 775 # complete on a nested level
776 776 with greedy_completion():
777 777 ip.user_ns['d'] = numpy.zeros(2, dtype=dt)
778 778 _, matches = complete(line_buffer="d[1]['my_head']['")
779 779 nt.assert_true(any(["my_dt" in m for m in matches]))
780 780 nt.assert_true(any(["my_df" in m for m in matches]))
781 781
782 782
783 783 @dec.skip_without('pandas')
784 784 def test_dataframe_key_completion():
785 785 """Test dict key completion applies to pandas DataFrames"""
786 786 import pandas
787 787 ip = get_ipython()
788 788 complete = ip.Completer.complete
789 789 ip.user_ns['d'] = pandas.DataFrame({'hello': [1], 'world': [2]})
790 790 _, matches = complete(line_buffer="d['")
791 791 nt.assert_in("hello", matches)
792 792 nt.assert_in("world", matches)
793 793
794 794
795 795 def test_dict_key_completion_invalids():
796 796 """Smoke test cases dict key completion can't handle"""
797 797 ip = get_ipython()
798 798 complete = ip.Completer.complete
799 799
800 800 ip.user_ns['no_getitem'] = None
801 801 ip.user_ns['no_keys'] = []
802 802 ip.user_ns['cant_call_keys'] = dict
803 803 ip.user_ns['empty'] = {}
804 804 ip.user_ns['d'] = {'abc': 5}
805 805
806 806 _, matches = complete(line_buffer="no_getitem['")
807 807 _, matches = complete(line_buffer="no_keys['")
808 808 _, matches = complete(line_buffer="cant_call_keys['")
809 809 _, matches = complete(line_buffer="empty['")
810 810 _, matches = complete(line_buffer="name_error['")
811 811 _, matches = complete(line_buffer="d['\\") # incomplete escape
812 812
813 813 class KeyCompletable(object):
814 814 def __init__(self, things=()):
815 815 self.things = things
816 816
817 817 def _ipython_key_completions_(self):
818 818 return list(self.things)
819 819
820 820 def test_object_key_completion():
821 821 ip = get_ipython()
822 822 ip.user_ns['key_completable'] = KeyCompletable(['qwerty', 'qwick'])
823 823
824 824 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
825 825 nt.assert_in('qwerty', matches)
826 826 nt.assert_in('qwick', matches)
827 827
828 828
829 829 def test_tryimport():
830 830 """
831 831 Test that try-import don't crash on trailing dot, and import modules before
832 832 """
833 833 from IPython.core.completerlib import try_import
834 834 assert(try_import("IPython."))
835 835
836 836
837 837 def test_aimport_module_completer():
838 838 ip = get_ipython()
839 839 _, matches = ip.complete('i', '%aimport i')
840 840 nt.assert_in('io', matches)
841 841 nt.assert_not_in('int', matches)
842 842
843 843 def test_nested_import_module_completer():
844 844 ip = get_ipython()
845 845 _, matches = ip.complete(None, 'import IPython.co', 17)
846 846 nt.assert_in('IPython.core', matches)
847 847 nt.assert_not_in('import IPython.core', matches)
848 848 nt.assert_not_in('IPython.display', matches)
849 849
850 850 def test_import_module_completer():
851 851 ip = get_ipython()
852 852 _, matches = ip.complete('i', 'import i')
853 853 nt.assert_in('io', matches)
854 854 nt.assert_not_in('int', matches)
855 855
856 856 def test_from_module_completer():
857 857 ip = get_ipython()
858 858 _, matches = ip.complete('B', 'from io import B', 16)
859 859 nt.assert_in('BytesIO', matches)
860 860 nt.assert_not_in('BaseException', matches)
861
862 def test_snake_case_completion():
863 ip = get_ipython()
864 ip.user_ns['some_three'] = 3
865 ip.user_ns['some_four'] = 4
866 _, matches = ip.complete("s_", "print(s_f")
867 nt.assert_in('some_three', matches)
868 nt.assert_in('some_four', matches) No newline at end of file
General Comments 0
You need to be logged in to leave comments. Login now