##// END OF EJS Templates
Fix a couple of types annotation and actual types errors....
Matthias Bussonnier -
Show More
@@ -1,1933 +1,1933 b''
1 1 # encoding: utf-8
2 2 """Completion for IPython.
3 3
4 4 This module started as fork of the rlcompleter module in the Python standard
5 5 library. The original enhancements made to rlcompleter have been sent
6 6 upstream and were accepted as of Python 2.3,
7 7
8 8 This module now support a wide variety of completion mechanism both available
9 9 for normal classic Python code, as well as completer for IPython specific
10 10 Syntax like magics.
11 11
12 12 Latex and Unicode completion
13 13 ============================
14 14
15 15 IPython and compatible frontends not only can complete your code, but can help
16 16 you to input a wide range of characters. In particular we allow you to insert
17 17 a unicode character using the tab completion mechanism.
18 18
19 19 Forward latex/unicode completion
20 20 --------------------------------
21 21
22 22 Forward completion allows you to easily type a unicode character using its latex
23 23 name, or unicode long description. To do so type a backslash follow by the
24 24 relevant name and press tab:
25 25
26 26
27 27 Using latex completion:
28 28
29 29 .. code::
30 30
31 31 \\alpha<tab>
32 32 Ξ±
33 33
34 34 or using unicode completion:
35 35
36 36
37 37 .. code::
38 38
39 39 \\greek small letter alpha<tab>
40 40 Ξ±
41 41
42 42
43 43 Only valid Python identifiers will complete. Combining characters (like arrow or
44 44 dots) are also available, unlike latex they need to be put after the their
45 45 counterpart that is to say, `F\\\\vec<tab>` is correct, not `\\\\vec<tab>F`.
46 46
47 47 Some browsers are known to display combining characters incorrectly.
48 48
49 49 Backward latex completion
50 50 -------------------------
51 51
52 52 It is sometime challenging to know how to type a character, if you are using
53 53 IPython, or any compatible frontend you can prepend backslash to the character
54 54 and press `<tab>` to expand it to its latex form.
55 55
56 56 .. code::
57 57
58 58 \\Ξ±<tab>
59 59 \\alpha
60 60
61 61
62 62 Both forward and backward completions can be deactivated by setting the
63 63 ``Completer.backslash_combining_completions`` option to ``False``.
64 64
65 65
66 66 Experimental
67 67 ============
68 68
69 69 Starting with IPython 6.0, this module can make use of the Jedi library to
70 70 generate completions both using static analysis of the code, and dynamically
71 71 inspecting multiple namespaces. The APIs attached to this new mechanism is
72 72 unstable and will raise unless use in an :any:`provisionalcompleter` context
73 73 manager.
74 74
75 75 You will find that the following are experimental:
76 76
77 77 - :any:`provisionalcompleter`
78 78 - :any:`IPCompleter.completions`
79 79 - :any:`Completion`
80 80 - :any:`rectify_completions`
81 81
82 82 .. note::
83 83
84 84 better name for :any:`rectify_completions` ?
85 85
86 86 We welcome any feedback on these new API, and we also encourage you to try this
87 87 module in debug mode (start IPython with ``--Completer.debug=True``) in order
88 88 to have extra logging information is :any:`jedi` is crashing, or if current
89 89 IPython completer pending deprecations are returning results not yet handled
90 90 by :any:`jedi`.
91 91
92 92 Using Jedi for tab completion allow snippets like the following to work without
93 93 having to execute any code:
94 94
95 95 >>> myvar = ['hello', 42]
96 96 ... myvar[1].bi<tab>
97 97
98 98 Tab completion will be able to infer that ``myvar[1]`` is a real number without
99 99 executing any code unlike the previously available ``IPCompleter.greedy``
100 100 option.
101 101
102 102 Be sure to update :any:`jedi` to the latest stable version or to try the
103 103 current development version to get better completions.
104 104 """
105 105
106 106 # skip module docstests
107 107 skip_doctest = True
108 108
109 109 # Copyright (c) IPython Development Team.
110 110 # Distributed under the terms of the Modified BSD License.
111 111 #
112 112 # Some of this code originated from rlcompleter in the Python standard library
113 113 # Copyright (C) 2001 Python Software Foundation, www.python.org
114 114
115 115
116 116 import __main__
117 117 import builtins as builtin_mod
118 118 import glob
119 119 import time
120 120 import inspect
121 121 import itertools
122 122 import keyword
123 123 import os
124 124 import re
125 125 import sys
126 126 import unicodedata
127 127 import string
128 128 import warnings
129 129
130 130 from contextlib import contextmanager
131 131 from importlib import import_module
132 from typing import Iterator, List
132 from typing import Iterator, List, Tuple, Iterable, Union
133 133 from types import SimpleNamespace
134 134
135 135 from traitlets.config.configurable import Configurable
136 136 from IPython.core.error import TryNext
137 137 from IPython.core.inputsplitter import ESC_MAGIC
138 138 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
139 139 from IPython.utils import generics
140 140 from IPython.utils.dir2 import dir2, get_real_method
141 141 from IPython.utils.process import arg_split
142 142 from traitlets import Bool, Enum, observe, Int
143 143
144 144 try:
145 145 import jedi
146 146 import jedi.api.helpers
147 147 JEDI_INSTALLED = True
148 148 except ImportError:
149 149 JEDI_INSTALLED = False
150 150 #-----------------------------------------------------------------------------
151 151 # Globals
152 152 #-----------------------------------------------------------------------------
153 153
154 154 # Public API
155 155 __all__ = ['Completer','IPCompleter']
156 156
157 157 if sys.platform == 'win32':
158 158 PROTECTABLES = ' '
159 159 else:
160 160 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
161 161
162 162
163 163 _deprecation_readline_sentinel = object()
164 164
165 165
166 166 class ProvisionalCompleterWarning(FutureWarning):
167 167 """
168 168 Exception raise by an experimental feature in this module.
169 169
170 170 Wrap code in :any:`provisionalcompleter` context manager if you
171 171 are certain you want to use an unstable feature.
172 172 """
173 173 pass
174 174
175 175 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
176 176
177 177 @contextmanager
178 178 def provisionalcompleter(action='ignore'):
179 179 """
180 180
181 181
182 182 This contest manager has to be used in any place where unstable completer
183 183 behavior and API may be called.
184 184
185 185 >>> with provisionalcompleter():
186 186 ... completer.do_experimetal_things() # works
187 187
188 188 >>> completer.do_experimental_things() # raises.
189 189
190 190 .. note:: Unstable
191 191
192 192 By using this context manager you agree that the API in use may change
193 193 without warning, and that you won't complain if they do so.
194 194
195 195 You also understand that if the API is not to you liking you should report
196 196 a bug to explain your use case upstream and improve the API and will loose
197 197 credibility if you complain after the API is make stable.
198 198
199 199 We'll be happy to get your feedback , feature request and improvement on
200 200 any of the unstable APIs !
201 201 """
202 202 with warnings.catch_warnings():
203 203 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
204 204 yield
205 205
206 206
207 207 def has_open_quotes(s):
208 208 """Return whether a string has open quotes.
209 209
210 210 This simply counts whether the number of quote characters of either type in
211 211 the string is odd.
212 212
213 213 Returns
214 214 -------
215 215 If there is an open quote, the quote character is returned. Else, return
216 216 False.
217 217 """
218 218 # We check " first, then ', so complex cases with nested quotes will get
219 219 # the " to take precedence.
220 220 if s.count('"') % 2:
221 221 return '"'
222 222 elif s.count("'") % 2:
223 223 return "'"
224 224 else:
225 225 return False
226 226
227 227
228 228 def protect_filename(s, protectables=PROTECTABLES):
229 229 """Escape a string to protect certain characters."""
230 230 if set(s) & set(protectables):
231 231 if sys.platform == "win32":
232 232 return '"' + s + '"'
233 233 else:
234 234 return "".join(("\\" + c if c in protectables else c) for c in s)
235 235 else:
236 236 return s
237 237
238 238
239 239 def expand_user(path):
240 240 """Expand ``~``-style usernames in strings.
241 241
242 242 This is similar to :func:`os.path.expanduser`, but it computes and returns
243 243 extra information that will be useful if the input was being used in
244 244 computing completions, and you wish to return the completions with the
245 245 original '~' instead of its expanded value.
246 246
247 247 Parameters
248 248 ----------
249 249 path : str
250 250 String to be expanded. If no ~ is present, the output is the same as the
251 251 input.
252 252
253 253 Returns
254 254 -------
255 255 newpath : str
256 256 Result of ~ expansion in the input path.
257 257 tilde_expand : bool
258 258 Whether any expansion was performed or not.
259 259 tilde_val : str
260 260 The value that ~ was replaced with.
261 261 """
262 262 # Default values
263 263 tilde_expand = False
264 264 tilde_val = ''
265 265 newpath = path
266 266
267 267 if path.startswith('~'):
268 268 tilde_expand = True
269 269 rest = len(path)-1
270 270 newpath = os.path.expanduser(path)
271 271 if rest:
272 272 tilde_val = newpath[:-rest]
273 273 else:
274 274 tilde_val = newpath
275 275
276 276 return newpath, tilde_expand, tilde_val
277 277
278 278
279 279 def compress_user(path, tilde_expand, tilde_val):
280 280 """Does the opposite of expand_user, with its outputs.
281 281 """
282 282 if tilde_expand:
283 283 return path.replace(tilde_val, '~')
284 284 else:
285 285 return path
286 286
287 287
288 288 def completions_sorting_key(word):
289 289 """key for sorting completions
290 290
291 291 This does several things:
292 292
293 293 - Lowercase all completions, so they are sorted alphabetically with
294 294 upper and lower case words mingled
295 295 - Demote any completions starting with underscores to the end
296 296 - Insert any %magic and %%cellmagic completions in the alphabetical order
297 297 by their name
298 298 """
299 299 # Case insensitive sort
300 300 word = word.lower()
301 301
302 302 prio1, prio2 = 0, 0
303 303
304 304 if word.startswith('__'):
305 305 prio1 = 2
306 306 elif word.startswith('_'):
307 307 prio1 = 1
308 308
309 309 if word.endswith('='):
310 310 prio1 = -1
311 311
312 312 if word.startswith('%%'):
313 313 # If there's another % in there, this is something else, so leave it alone
314 314 if not "%" in word[2:]:
315 315 word = word[2:]
316 316 prio2 = 2
317 317 elif word.startswith('%'):
318 318 if not "%" in word[1:]:
319 319 word = word[1:]
320 320 prio2 = 1
321 321
322 322 return prio1, word, prio2
323 323
324 324
325 325 class _FakeJediCompletion:
326 326 """
327 327 This is a workaround to communicate to the UI that Jedi has crashed and to
328 328 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
329 329
330 330 Added in IPython 6.0 so should likely be removed for 7.0
331 331
332 332 """
333 333
334 334 def __init__(self, name):
335 335
336 336 self.name = name
337 337 self.complete = name
338 338 self.type = 'crashed'
339 339 self.name_with_symbols = name
340 340
341 341 def __repr__(self):
342 342 return '<Fake completion object jedi has crashed>'
343 343
344 344
345 345 class Completion:
346 346 """
347 347 Completion object used and return by IPython completers.
348 348
349 349 .. warning:: Unstable
350 350
351 351 This function is unstable, API may change without warning.
352 352 It will also raise unless use in proper context manager.
353 353
354 354 This act as a middle ground :any:`Completion` object between the
355 355 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
356 356 object. While Jedi need a lot of information about evaluator and how the
357 357 code should be ran/inspected, PromptToolkit (and other frontend) mostly
358 358 need user facing information.
359 359
360 360 - Which range should be replaced replaced by what.
361 361 - Some metadata (like completion type), or meta informations to displayed to
362 362 the use user.
363 363
364 364 For debugging purpose we can also store the origin of the completion (``jedi``,
365 365 ``IPython.python_matches``, ``IPython.magics_matches``...).
366 366 """
367 367
368 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin=''):
368 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='') -> None:
369 369 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
370 370 "It may change without warnings. "
371 371 "Use in corresponding context manager.",
372 372 category=ProvisionalCompleterWarning, stacklevel=2)
373 373
374 374 self.start = start
375 375 self.end = end
376 376 self.text = text
377 377 self.type = type
378 378 self._origin = _origin
379 379
380 380 def __repr__(self):
381 381 return '<Completion start=%s end=%s text=%r type=%r>' % (self.start, self.end, self.text, self.type or '?')
382 382
383 383 def __eq__(self, other)->Bool:
384 384 """
385 385 Equality and hash do not hash the type (as some completer may not be
386 386 able to infer the type), but are use to (partially) de-duplicate
387 387 completion.
388 388
389 389 Completely de-duplicating completion is a bit tricker that just
390 390 comparing as it depends on surrounding text, which Completions are not
391 391 aware of.
392 392 """
393 393 return self.start == other.start and \
394 394 self.end == other.end and \
395 395 self.text == other.text
396 396
397 397 def __hash__(self):
398 398 return hash((self.start, self.end, self.text))
399 399
400 400
401 _IC = Iterator[Completion]
401 _IC = Iterable[Completion]
402 402
403 403
404 404 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
405 405 """
406 406 Deduplicate a set of completions.
407 407
408 408 .. warning:: Unstable
409 409
410 410 This function is unstable, API may change without warning.
411 411
412 412 Parameters
413 413 ----------
414 414 text: str
415 415 text that should be completed.
416 416 completions: Iterator[Completion]
417 417 iterator over the completions to deduplicate
418 418
419 419
420 420 Completions coming from multiple sources, may be different but end up having
421 421 the same effect when applied to ``text``. If this is the case, this will
422 422 consider completions as equal and only emit the first encountered.
423 423
424 424 Not folded in `completions()` yet for debugging purpose, and to detect when
425 425 the IPython completer does return things that Jedi does not, but should be
426 426 at some point.
427 427 """
428 428 completions = list(completions)
429 429 if not completions:
430 430 return
431 431
432 432 new_start = min(c.start for c in completions)
433 433 new_end = max(c.end for c in completions)
434 434
435 435 seen = set()
436 436 for c in completions:
437 437 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
438 438 if new_text not in seen:
439 439 yield c
440 440 seen.add(new_text)
441 441
442 442
443 443 def rectify_completions(text: str, completions: _IC, *, _debug=False)->_IC:
444 444 """
445 445 Rectify a set of completions to all have the same ``start`` and ``end``
446 446
447 447 .. warning:: Unstable
448 448
449 449 This function is unstable, API may change without warning.
450 450 It will also raise unless use in proper context manager.
451 451
452 452 Parameters
453 453 ----------
454 454 text: str
455 455 text that should be completed.
456 456 completions: Iterator[Completion]
457 457 iterator over the completions to rectify
458 458
459 459
460 460 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
461 461 the Jupyter Protocol requires them to behave like so. This will readjust
462 462 the completion to have the same ``start`` and ``end`` by padding both
463 463 extremities with surrounding text.
464 464
465 465 During stabilisation should support a ``_debug`` option to log which
466 466 completion are return by the IPython completer and not found in Jedi in
467 467 order to make upstream bug report.
468 468 """
469 469 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
470 470 "It may change without warnings. "
471 471 "Use in corresponding context manager.",
472 472 category=ProvisionalCompleterWarning, stacklevel=2)
473 473
474 474 completions = list(completions)
475 475 if not completions:
476 476 return
477 477 starts = (c.start for c in completions)
478 478 ends = (c.end for c in completions)
479 479
480 480 new_start = min(starts)
481 481 new_end = max(ends)
482 482
483 483 seen_jedi = set()
484 484 seen_python_matches = set()
485 485 for c in completions:
486 486 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
487 487 if c._origin == 'jedi':
488 488 seen_jedi.add(new_text)
489 489 elif c._origin == 'IPCompleter.python_matches':
490 490 seen_python_matches.add(new_text)
491 491 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin)
492 492 diff = seen_python_matches.difference(seen_jedi)
493 493 if diff and _debug:
494 494 print('IPython.python matches have extras:', diff)
495 495
496 496
497 497 if sys.platform == 'win32':
498 498 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
499 499 else:
500 500 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
501 501
502 502 GREEDY_DELIMS = ' =\r\n'
503 503
504 504
505 505 class CompletionSplitter(object):
506 506 """An object to split an input line in a manner similar to readline.
507 507
508 508 By having our own implementation, we can expose readline-like completion in
509 509 a uniform manner to all frontends. This object only needs to be given the
510 510 line of text to be split and the cursor position on said line, and it
511 511 returns the 'word' to be completed on at the cursor after splitting the
512 512 entire line.
513 513
514 514 What characters are used as splitting delimiters can be controlled by
515 515 setting the ``delims`` attribute (this is a property that internally
516 516 automatically builds the necessary regular expression)"""
517 517
518 518 # Private interface
519 519
520 520 # A string of delimiter characters. The default value makes sense for
521 521 # IPython's most typical usage patterns.
522 522 _delims = DELIMS
523 523
524 524 # The expression (a normal string) to be compiled into a regular expression
525 525 # for actual splitting. We store it as an attribute mostly for ease of
526 526 # debugging, since this type of code can be so tricky to debug.
527 527 _delim_expr = None
528 528
529 529 # The regular expression that does the actual splitting
530 530 _delim_re = None
531 531
532 532 def __init__(self, delims=None):
533 533 delims = CompletionSplitter._delims if delims is None else delims
534 534 self.delims = delims
535 535
536 536 @property
537 537 def delims(self):
538 538 """Return the string of delimiter characters."""
539 539 return self._delims
540 540
541 541 @delims.setter
542 542 def delims(self, delims):
543 543 """Set the delimiters for line splitting."""
544 544 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
545 545 self._delim_re = re.compile(expr)
546 546 self._delims = delims
547 547 self._delim_expr = expr
548 548
549 549 def split_line(self, line, cursor_pos=None):
550 550 """Split a line of text with a cursor at the given position.
551 551 """
552 552 l = line if cursor_pos is None else line[:cursor_pos]
553 553 return self._delim_re.split(l)[-1]
554 554
555 555
556 556
557 557 class Completer(Configurable):
558 558
559 559 greedy = Bool(False,
560 560 help="""Activate greedy completion
561 561 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
562 562
563 563 This will enable completion on elements of lists, results of function calls, etc.,
564 564 but can be unsafe because the code is actually evaluated on TAB.
565 565 """
566 566 ).tag(config=True)
567 567
568 568 use_jedi = Bool(default_value=JEDI_INSTALLED,
569 569 help="Experimental: Use Jedi to generate autocompletions. "
570 570 "Default to True if jedi is installed").tag(config=True)
571 571
572 572 jedi_compute_type_timeout = Int(default_value=400,
573 573 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
574 574 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
575 575 performance by preventing jedi to build its cache.
576 576 """).tag(config=True)
577 577
578 578 debug = Bool(default_value=False,
579 579 help='Enable debug for the Completer. Mostly print extra '
580 580 'information for experimental jedi integration.')\
581 581 .tag(config=True)
582 582
583 583 backslash_combining_completions = Bool(True,
584 584 help="Enable unicode completions, e.g. \\alpha<tab> . "
585 585 "Includes completion of latex commands, unicode names, and expanding "
586 586 "unicode characters back to latex commands.").tag(config=True)
587 587
588 588
589 589
590 590 def __init__(self, namespace=None, global_namespace=None, **kwargs):
591 591 """Create a new completer for the command line.
592 592
593 593 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
594 594
595 595 If unspecified, the default namespace where completions are performed
596 596 is __main__ (technically, __main__.__dict__). Namespaces should be
597 597 given as dictionaries.
598 598
599 599 An optional second namespace can be given. This allows the completer
600 600 to handle cases where both the local and global scopes need to be
601 601 distinguished.
602 602 """
603 603
604 604 # Don't bind to namespace quite yet, but flag whether the user wants a
605 605 # specific namespace or to use __main__.__dict__. This will allow us
606 606 # to bind to __main__.__dict__ at completion time, not now.
607 607 if namespace is None:
608 608 self.use_main_ns = True
609 609 else:
610 610 self.use_main_ns = False
611 611 self.namespace = namespace
612 612
613 613 # The global namespace, if given, can be bound directly
614 614 if global_namespace is None:
615 615 self.global_namespace = {}
616 616 else:
617 617 self.global_namespace = global_namespace
618 618
619 619 super(Completer, self).__init__(**kwargs)
620 620
621 621 def complete(self, text, state):
622 622 """Return the next possible completion for 'text'.
623 623
624 624 This is called successively with state == 0, 1, 2, ... until it
625 625 returns None. The completion should begin with 'text'.
626 626
627 627 """
628 628 if self.use_main_ns:
629 629 self.namespace = __main__.__dict__
630 630
631 631 if state == 0:
632 632 if "." in text:
633 633 self.matches = self.attr_matches(text)
634 634 else:
635 635 self.matches = self.global_matches(text)
636 636 try:
637 637 return self.matches[state]
638 638 except IndexError:
639 639 return None
640 640
641 641 def global_matches(self, text):
642 642 """Compute matches when text is a simple name.
643 643
644 644 Return a list of all keywords, built-in functions and names currently
645 645 defined in self.namespace or self.global_namespace that match.
646 646
647 647 """
648 648 matches = []
649 649 match_append = matches.append
650 650 n = len(text)
651 651 for lst in [keyword.kwlist,
652 652 builtin_mod.__dict__.keys(),
653 653 self.namespace.keys(),
654 654 self.global_namespace.keys()]:
655 655 for word in lst:
656 656 if word[:n] == text and word != "__builtins__":
657 657 match_append(word)
658 658
659 659 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
660 660 for lst in [self.namespace.keys(),
661 661 self.global_namespace.keys()]:
662 662 shortened = {"_".join([sub[0] for sub in word.split('_')]) : word
663 663 for word in lst if snake_case_re.match(word)}
664 664 for word in shortened.keys():
665 665 if word[:n] == text and word != "__builtins__":
666 666 match_append(shortened[word])
667 667 return matches
668 668
669 669 def attr_matches(self, text):
670 670 """Compute matches when text contains a dot.
671 671
672 672 Assuming the text is of the form NAME.NAME....[NAME], and is
673 673 evaluatable in self.namespace or self.global_namespace, it will be
674 674 evaluated and its attributes (as revealed by dir()) are used as
675 675 possible completions. (For class instances, class members are are
676 676 also considered.)
677 677
678 678 WARNING: this can still invoke arbitrary C code, if an object
679 679 with a __getattr__ hook is evaluated.
680 680
681 681 """
682 682
683 683 # Another option, seems to work great. Catches things like ''.<tab>
684 684 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
685 685
686 686 if m:
687 687 expr, attr = m.group(1, 3)
688 688 elif self.greedy:
689 689 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
690 690 if not m2:
691 691 return []
692 692 expr, attr = m2.group(1,2)
693 693 else:
694 694 return []
695 695
696 696 try:
697 697 obj = eval(expr, self.namespace)
698 698 except:
699 699 try:
700 700 obj = eval(expr, self.global_namespace)
701 701 except:
702 702 return []
703 703
704 704 if self.limit_to__all__ and hasattr(obj, '__all__'):
705 705 words = get__all__entries(obj)
706 706 else:
707 707 words = dir2(obj)
708 708
709 709 try:
710 710 words = generics.complete_object(obj, words)
711 711 except TryNext:
712 712 pass
713 713 except AssertionError:
714 714 raise
715 715 except Exception:
716 716 # Silence errors from completion function
717 717 #raise # dbg
718 718 pass
719 719 # Build match list to return
720 720 n = len(attr)
721 721 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
722 722
723 723
724 724 def get__all__entries(obj):
725 725 """returns the strings in the __all__ attribute"""
726 726 try:
727 727 words = getattr(obj, '__all__')
728 728 except:
729 729 return []
730 730
731 731 return [w for w in words if isinstance(w, str)]
732 732
733 733
734 734 def match_dict_keys(keys: List[str], prefix: str, delims: str):
735 735 """Used by dict_key_matches, matching the prefix to a list of keys
736 736
737 737 Parameters
738 738 ==========
739 keys:
740 list of keys in dictionary currently being completed.
741 prefix:
742 Part of the text already typed by the user. e.g. `mydict[b'fo`
743 delims:
744 String of delimiters to consider when finding the current key.
739 keys:
740 list of keys in dictionary currently being completed.
741 prefix:
742 Part of the text already typed by the user. e.g. `mydict[b'fo`
743 delims:
744 String of delimiters to consider when finding the current key.
745 745
746 746 Returns
747 747 =======
748 748
749 749 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
750 750 ``quote`` being the quote that need to be used to close current string.
751 751 ``token_start`` the position where the replacement should start occurring,
752 752 ``matches`` a list of replacement/completion
753 753
754 754 """
755 755 if not prefix:
756 756 return None, 0, [repr(k) for k in keys
757 757 if isinstance(k, (str, bytes))]
758 758 quote_match = re.search('["\']', prefix)
759 759 quote = quote_match.group()
760 760 try:
761 761 prefix_str = eval(prefix + quote, {})
762 762 except Exception:
763 763 return None, 0, []
764 764
765 765 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
766 766 token_match = re.search(pattern, prefix, re.UNICODE)
767 767 token_start = token_match.start()
768 768 token_prefix = token_match.group()
769 769
770 770 matched = []
771 771 for key in keys:
772 772 try:
773 773 if not key.startswith(prefix_str):
774 774 continue
775 775 except (AttributeError, TypeError, UnicodeError):
776 776 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
777 777 continue
778 778
779 779 # reformat remainder of key to begin with prefix
780 780 rem = key[len(prefix_str):]
781 781 # force repr wrapped in '
782 782 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
783 783 if rem_repr.startswith('u') and prefix[0] not in 'uU':
784 784 # Found key is unicode, but prefix is Py2 string.
785 785 # Therefore attempt to interpret key as string.
786 786 try:
787 787 rem_repr = repr(rem.encode('ascii') + '"')
788 788 except UnicodeEncodeError:
789 789 continue
790 790
791 791 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
792 792 if quote == '"':
793 793 # The entered prefix is quoted with ",
794 794 # but the match is quoted with '.
795 795 # A contained " hence needs escaping for comparison:
796 796 rem_repr = rem_repr.replace('"', '\\"')
797 797
798 798 # then reinsert prefix from start of token
799 799 matched.append('%s%s' % (token_prefix, rem_repr))
800 800 return quote, token_start, matched
801 801
802 802
803 def cursor_to_position(text:int, line:int, column:int)->int:
803 def cursor_to_position(text:str, line:int, column:int)->int:
804 804 """
805 805
806 806 Convert the (line,column) position of the cursor in text to an offset in a
807 807 string.
808 808
809 809 Parameters
810 810 ----------
811 811
812 812 text : str
813 813 The text in which to calculate the cursor offset
814 814 line : int
815 815 Line of the cursor; 0-indexed
816 816 column : int
817 817 Column of the cursor 0-indexed
818 818
819 819 Return
820 820 ------
821 821 Position of the cursor in ``text``, 0-indexed.
822 822
823 823 See Also
824 824 --------
825 825 position_to_cursor: reciprocal of this function
826 826
827 827 """
828 828 lines = text.split('\n')
829 829 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
830 830
831 831 return sum(len(l) + 1 for l in lines[:line]) + column
832 832
833 def position_to_cursor(text:str, offset:int)->(int, int):
833 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
834 834 """
835 835 Convert the position of the cursor in text (0 indexed) to a line
836 836 number(0-indexed) and a column number (0-indexed) pair
837 837
838 838 Position should be a valid position in ``text``.
839 839
840 840 Parameters
841 841 ----------
842 842
843 843 text : str
844 844 The text in which to calculate the cursor offset
845 845 offset : int
846 846 Position of the cursor in ``text``, 0-indexed.
847 847
848 848 Return
849 849 ------
850 850 (line, column) : (int, int)
851 851 Line of the cursor; 0-indexed, column of the cursor 0-indexed
852 852
853 853
854 854 See Also
855 855 --------
856 856 cursor_to_position : reciprocal of this function
857 857
858 858
859 859 """
860 860
861 861 assert 0 < offset <= len(text) , "0 < %s <= %s" % (offset , len(text))
862 862
863 863 before = text[:offset]
864 864 blines = before.split('\n') # ! splitnes trim trailing \n
865 865 line = before.count('\n')
866 866 col = len(blines[-1])
867 867 return line, col
868 868
869 869
870 870 def _safe_isinstance(obj, module, class_name):
871 871 """Checks if obj is an instance of module.class_name if loaded
872 872 """
873 873 return (module in sys.modules and
874 874 isinstance(obj, getattr(import_module(module), class_name)))
875 875
876 876
877 877 def back_unicode_name_matches(text):
878 878 u"""Match unicode characters back to unicode name
879 879
880 880 This does ``β˜ƒ`` -> ``\\snowman``
881 881
882 882 Note that snowman is not a valid python3 combining character but will be expanded.
883 883 Though it will not recombine back to the snowman character by the completion machinery.
884 884
885 885 This will not either back-complete standard sequences like \\n, \\b ...
886 886
887 887 Used on Python 3 only.
888 888 """
889 889 if len(text)<2:
890 890 return u'', ()
891 891 maybe_slash = text[-2]
892 892 if maybe_slash != '\\':
893 893 return u'', ()
894 894
895 895 char = text[-1]
896 896 # no expand on quote for completion in strings.
897 897 # nor backcomplete standard ascii keys
898 898 if char in string.ascii_letters or char in ['"',"'"]:
899 899 return u'', ()
900 900 try :
901 901 unic = unicodedata.name(char)
902 902 return '\\'+char,['\\'+unic]
903 903 except KeyError:
904 904 pass
905 905 return u'', ()
906 906
907 907 def back_latex_name_matches(text:str):
908 908 """Match latex characters back to unicode name
909 909
910 910 This does ``\\β„΅`` -> ``\\aleph``
911 911
912 912 Used on Python 3 only.
913 913 """
914 914 if len(text)<2:
915 915 return u'', ()
916 916 maybe_slash = text[-2]
917 917 if maybe_slash != '\\':
918 918 return u'', ()
919 919
920 920
921 921 char = text[-1]
922 922 # no expand on quote for completion in strings.
923 923 # nor backcomplete standard ascii keys
924 924 if char in string.ascii_letters or char in ['"',"'"]:
925 925 return u'', ()
926 926 try :
927 927 latex = reverse_latex_symbol[char]
928 928 # '\\' replace the \ as well
929 929 return '\\'+char,[latex]
930 930 except KeyError:
931 931 pass
932 932 return u'', ()
933 933
934 934
935 935 class IPCompleter(Completer):
936 936 """Extension of the completer class with IPython-specific features"""
937 937
938 938 @observe('greedy')
939 939 def _greedy_changed(self, change):
940 940 """update the splitter and readline delims when greedy is changed"""
941 941 if change['new']:
942 942 self.splitter.delims = GREEDY_DELIMS
943 943 else:
944 944 self.splitter.delims = DELIMS
945 945
946 946 merge_completions = Bool(True,
947 947 help="""Whether to merge completion results into a single list
948 948
949 949 If False, only the completion results from the first non-empty
950 950 completer will be returned.
951 951 """
952 952 ).tag(config=True)
953 953 omit__names = Enum((0,1,2), default_value=2,
954 954 help="""Instruct the completer to omit private method names
955 955
956 956 Specifically, when completing on ``object.<tab>``.
957 957
958 958 When 2 [default]: all names that start with '_' will be excluded.
959 959
960 960 When 1: all 'magic' names (``__foo__``) will be excluded.
961 961
962 962 When 0: nothing will be excluded.
963 963 """
964 964 ).tag(config=True)
965 965 limit_to__all__ = Bool(False,
966 966 help="""
967 967 DEPRECATED as of version 5.0.
968 968
969 969 Instruct the completer to use __all__ for the completion
970 970
971 971 Specifically, when completing on ``object.<tab>``.
972 972
973 973 When True: only those names in obj.__all__ will be included.
974 974
975 975 When False [default]: the __all__ attribute is ignored
976 976 """,
977 977 ).tag(config=True)
978 978
979 979 @observe('limit_to__all__')
980 980 def _limit_to_all_changed(self, change):
981 981 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
982 982 'value has been deprecated since IPython 5.0, will be made to have '
983 983 'no effects and then removed in future version of IPython.',
984 984 UserWarning)
985 985
986 986 def __init__(self, shell=None, namespace=None, global_namespace=None,
987 987 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
988 988 """IPCompleter() -> completer
989 989
990 990 Return a completer object.
991 991
992 992 Parameters
993 993 ----------
994 994
995 995 shell
996 996 a pointer to the ipython shell itself. This is needed
997 997 because this completer knows about magic functions, and those can
998 998 only be accessed via the ipython instance.
999 999
1000 1000 namespace : dict, optional
1001 1001 an optional dict where completions are performed.
1002 1002
1003 1003 global_namespace : dict, optional
1004 1004 secondary optional dict for completions, to
1005 1005 handle cases (such as IPython embedded inside functions) where
1006 1006 both Python scopes are visible.
1007 1007
1008 1008 use_readline : bool, optional
1009 1009 DEPRECATED, ignored since IPython 6.0, will have no effects
1010 1010 """
1011 1011
1012 1012 self.magic_escape = ESC_MAGIC
1013 1013 self.splitter = CompletionSplitter()
1014 1014
1015 1015 if use_readline is not _deprecation_readline_sentinel:
1016 1016 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
1017 1017 DeprecationWarning, stacklevel=2)
1018 1018
1019 1019 # _greedy_changed() depends on splitter and readline being defined:
1020 1020 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
1021 1021 config=config, **kwargs)
1022 1022
1023 1023 # List where completion matches will be stored
1024 1024 self.matches = []
1025 1025 self.shell = shell
1026 1026 # Regexp to split filenames with spaces in them
1027 1027 self.space_name_re = re.compile(r'([^\\] )')
1028 1028 # Hold a local ref. to glob.glob for speed
1029 1029 self.glob = glob.glob
1030 1030
1031 1031 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1032 1032 # buffers, to avoid completion problems.
1033 1033 term = os.environ.get('TERM','xterm')
1034 1034 self.dumb_terminal = term in ['dumb','emacs']
1035 1035
1036 1036 # Special handling of backslashes needed in win32 platforms
1037 1037 if sys.platform == "win32":
1038 1038 self.clean_glob = self._clean_glob_win32
1039 1039 else:
1040 1040 self.clean_glob = self._clean_glob
1041 1041
1042 1042 #regexp to parse docstring for function signature
1043 1043 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1044 1044 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1045 1045 #use this if positional argument name is also needed
1046 1046 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1047 1047
1048 1048 # All active matcher routines for completion
1049 1049 self.matchers = [
1050 1050 self.python_matches,
1051 1051 self.file_matches,
1052 1052 self.magic_config_matches,
1053 1053 self.magic_matches,
1054 1054 self.python_func_kw_matches,
1055 1055 self.dict_key_matches,
1056 1056 ]
1057 1057
1058 1058 # This is set externally by InteractiveShell
1059 1059 self.custom_completers = None
1060 1060
1061 1061 def all_completions(self, text):
1062 1062 """
1063 1063 Wrapper around the complete method for the benefit of emacs.
1064 1064 """
1065 1065 return self.complete(text)[1]
1066 1066
1067 1067 def _clean_glob(self, text):
1068 1068 return self.glob("%s*" % text)
1069 1069
1070 1070 def _clean_glob_win32(self,text):
1071 1071 return [f.replace("\\","/")
1072 1072 for f in self.glob("%s*" % text)]
1073 1073
1074 1074 def file_matches(self, text):
1075 1075 """Match filenames, expanding ~USER type strings.
1076 1076
1077 1077 Most of the seemingly convoluted logic in this completer is an
1078 1078 attempt to handle filenames with spaces in them. And yet it's not
1079 1079 quite perfect, because Python's readline doesn't expose all of the
1080 1080 GNU readline details needed for this to be done correctly.
1081 1081
1082 1082 For a filename with a space in it, the printed completions will be
1083 1083 only the parts after what's already been typed (instead of the
1084 1084 full completions, as is normally done). I don't think with the
1085 1085 current (as of Python 2.3) Python readline it's possible to do
1086 1086 better."""
1087 1087
1088 1088 # chars that require escaping with backslash - i.e. chars
1089 1089 # that readline treats incorrectly as delimiters, but we
1090 1090 # don't want to treat as delimiters in filename matching
1091 1091 # when escaped with backslash
1092 1092 if text.startswith('!'):
1093 1093 text = text[1:]
1094 1094 text_prefix = u'!'
1095 1095 else:
1096 1096 text_prefix = u''
1097 1097
1098 1098 text_until_cursor = self.text_until_cursor
1099 1099 # track strings with open quotes
1100 1100 open_quotes = has_open_quotes(text_until_cursor)
1101 1101
1102 1102 if '(' in text_until_cursor or '[' in text_until_cursor:
1103 1103 lsplit = text
1104 1104 else:
1105 1105 try:
1106 1106 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1107 1107 lsplit = arg_split(text_until_cursor)[-1]
1108 1108 except ValueError:
1109 1109 # typically an unmatched ", or backslash without escaped char.
1110 1110 if open_quotes:
1111 1111 lsplit = text_until_cursor.split(open_quotes)[-1]
1112 1112 else:
1113 1113 return []
1114 1114 except IndexError:
1115 1115 # tab pressed on empty line
1116 1116 lsplit = ""
1117 1117
1118 1118 if not open_quotes and lsplit != protect_filename(lsplit):
1119 1119 # if protectables are found, do matching on the whole escaped name
1120 1120 has_protectables = True
1121 1121 text0,text = text,lsplit
1122 1122 else:
1123 1123 has_protectables = False
1124 1124 text = os.path.expanduser(text)
1125 1125
1126 1126 if text == "":
1127 1127 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1128 1128
1129 1129 # Compute the matches from the filesystem
1130 1130 if sys.platform == 'win32':
1131 1131 m0 = self.clean_glob(text)
1132 1132 else:
1133 1133 m0 = self.clean_glob(text.replace('\\', ''))
1134 1134
1135 1135 if has_protectables:
1136 1136 # If we had protectables, we need to revert our changes to the
1137 1137 # beginning of filename so that we don't double-write the part
1138 1138 # of the filename we have so far
1139 1139 len_lsplit = len(lsplit)
1140 1140 matches = [text_prefix + text0 +
1141 1141 protect_filename(f[len_lsplit:]) for f in m0]
1142 1142 else:
1143 1143 if open_quotes:
1144 1144 # if we have a string with an open quote, we don't need to
1145 1145 # protect the names beyond the quote (and we _shouldn't_, as
1146 1146 # it would cause bugs when the filesystem call is made).
1147 1147 matches = m0 if sys.platform == "win32" else\
1148 1148 [protect_filename(f, open_quotes) for f in m0]
1149 1149 else:
1150 1150 matches = [text_prefix +
1151 1151 protect_filename(f) for f in m0]
1152 1152
1153 1153 # Mark directories in input list by appending '/' to their names.
1154 1154 return [x+'/' if os.path.isdir(x) else x for x in matches]
1155 1155
1156 1156 def magic_matches(self, text):
1157 1157 """Match magics"""
1158 1158 # Get all shell magics now rather than statically, so magics loaded at
1159 1159 # runtime show up too.
1160 1160 lsm = self.shell.magics_manager.lsmagic()
1161 1161 line_magics = lsm['line']
1162 1162 cell_magics = lsm['cell']
1163 1163 pre = self.magic_escape
1164 1164 pre2 = pre+pre
1165 1165
1166 1166 # Completion logic:
1167 1167 # - user gives %%: only do cell magics
1168 1168 # - user gives %: do both line and cell magics
1169 1169 # - no prefix: do both
1170 1170 # In other words, line magics are skipped if the user gives %% explicitly
1171 1171 #
1172 1172 # We also exclude magics that match any currently visible names:
1173 1173 # https://github.com/ipython/ipython/issues/4877
1174 1174 bare_text = text.lstrip(pre)
1175 1175 global_matches = self.global_matches(bare_text)
1176 1176 matches = lambda magic: magic.startswith(bare_text) \
1177 1177 and magic not in global_matches
1178 1178 comp = [ pre2+m for m in cell_magics if matches(m)]
1179 1179 if not text.startswith(pre2):
1180 1180 comp += [ pre+m for m in line_magics if matches(m)]
1181 1181
1182 1182 return comp
1183 1183
1184 1184 def magic_config_matches(self, text):
1185 1185 """ Match class names and attributes for %config magic """
1186 1186 # use line buffer instead of text (which is a word)
1187 1187 texts = self.line_buffer.strip().split()
1188 1188
1189 1189 if len(texts) > 0 and \
1190 1190 ('config'.startswith(texts[0]) or '%config'.startswith(texts[0])):
1191 1191 # get all configuration classes
1192 1192 classes = sorted(set([ c for c in self.shell.configurables
1193 1193 if c.__class__.class_traits(config=True)
1194 1194 ]), key=lambda x: x.__class__.__name__)
1195 1195 classnames = [ c.__class__.__name__ for c in classes ]
1196 1196
1197 1197 # return all classnames if config or %config is given
1198 1198 if len(texts) == 1:
1199 1199 return classnames
1200 1200
1201 1201 # match classname
1202 1202 classname_texts = texts[1].split('.')
1203 1203 classname = classname_texts[0]
1204 1204 classname_matches = [ c for c in classnames
1205 1205 if c.startswith(classname) ]
1206 1206
1207 1207 # return matched classes or the matched class with attributes
1208 1208 if texts[1].find('.') < 0:
1209 1209 return classname_matches
1210 1210 elif len(classname_matches) == 1 and \
1211 1211 classname_matches[0] == classname:
1212 1212 cls = classes[classnames.index(classname)].__class__
1213 1213 help = cls.class_get_help()
1214 1214 # strip leading '--' from cl-args:
1215 1215 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1216 1216 return [ attr.split('=')[0]
1217 1217 for attr in help.strip().splitlines()
1218 1218 if attr.startswith(texts[1]) ]
1219 1219 return []
1220 1220
1221 1221 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str):
1222 1222 """
1223 1223
1224 1224 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1225 1225 cursor position.
1226 1226
1227 1227 Parameters
1228 1228 ----------
1229 1229 cursor_column : int
1230 1230 column position of the cursor in ``text``, 0-indexed.
1231 1231 cursor_line : int
1232 1232 line position of the cursor in ``text``, 0-indexed
1233 1233 text : str
1234 1234 text to complete
1235 1235
1236 1236 Debugging
1237 1237 ---------
1238 1238
1239 1239 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1240 1240 object containing a string with the Jedi debug information attached.
1241 1241 """
1242 1242 namespaces = [self.namespace]
1243 1243 if self.global_namespace is not None:
1244 1244 namespaces.append(self.global_namespace)
1245 1245
1246 1246 completion_filter = lambda x:x
1247 1247 # cursor_pos is an it, jedi wants line and column
1248 1248 offset = cursor_to_position(text, cursor_line, cursor_column)
1249 1249 # filter output if we are completing for object members
1250 1250 if offset:
1251 1251 pre = text[offset-1]
1252 1252 if pre == '.':
1253 1253 if self.omit__names == 2:
1254 1254 completion_filter = lambda c:not c.name.startswith('_')
1255 1255 elif self.omit__names == 1:
1256 1256 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1257 1257 elif self.omit__names == 0:
1258 1258 completion_filter = lambda x:x
1259 1259 else:
1260 1260 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1261 1261
1262 1262 interpreter = jedi.Interpreter(
1263 1263 text, namespaces, column=cursor_column, line=cursor_line + 1)
1264 1264
1265 1265 try_jedi = False
1266 1266
1267 1267 try:
1268 1268 # should we check the type of the node is Error ?
1269 1269 from jedi.parser.tree import ErrorLeaf
1270 1270 next_to_last_tree = interpreter._get_module().tree_node.children[-2]
1271 1271 completing_string = False
1272 1272 if isinstance(next_to_last_tree, ErrorLeaf):
1273 1273 completing_string = interpreter._get_module().tree_node.children[-2].value[0] in {'"', "'"}
1274 1274 # if we are in a string jedi is likely not the right candidate for
1275 1275 # now. Skip it.
1276 1276 try_jedi = not completing_string
1277 1277 except Exception as e:
1278 1278 # many of things can go wrong, we are using private API just don't crash.
1279 1279 if self.debug:
1280 1280 print("Error detecting if completing a non-finished string :", e, '|')
1281 1281
1282 1282 if not try_jedi:
1283 1283 return []
1284 1284 try:
1285 1285 return filter(completion_filter, interpreter.completions())
1286 1286 except Exception as e:
1287 1287 if self.debug:
1288 1288 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1289 1289 else:
1290 1290 return []
1291 1291
1292 1292 def python_matches(self, text):
1293 1293 """Match attributes or global python names"""
1294 1294 if "." in text:
1295 1295 try:
1296 1296 matches = self.attr_matches(text)
1297 1297 if text.endswith('.') and self.omit__names:
1298 1298 if self.omit__names == 1:
1299 1299 # true if txt is _not_ a __ name, false otherwise:
1300 1300 no__name = (lambda txt:
1301 1301 re.match(r'.*\.__.*?__',txt) is None)
1302 1302 else:
1303 1303 # true if txt is _not_ a _ name, false otherwise:
1304 1304 no__name = (lambda txt:
1305 1305 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1306 1306 matches = filter(no__name, matches)
1307 1307 except NameError:
1308 1308 # catches <undefined attributes>.<tab>
1309 1309 matches = []
1310 1310 else:
1311 1311 matches = self.global_matches(text)
1312 1312 return matches
1313 1313
1314 1314 def _default_arguments_from_docstring(self, doc):
1315 1315 """Parse the first line of docstring for call signature.
1316 1316
1317 1317 Docstring should be of the form 'min(iterable[, key=func])\n'.
1318 1318 It can also parse cython docstring of the form
1319 1319 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1320 1320 """
1321 1321 if doc is None:
1322 1322 return []
1323 1323
1324 1324 #care only the firstline
1325 1325 line = doc.lstrip().splitlines()[0]
1326 1326
1327 1327 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1328 1328 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1329 1329 sig = self.docstring_sig_re.search(line)
1330 1330 if sig is None:
1331 1331 return []
1332 1332 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1333 1333 sig = sig.groups()[0].split(',')
1334 1334 ret = []
1335 1335 for s in sig:
1336 1336 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1337 1337 ret += self.docstring_kwd_re.findall(s)
1338 1338 return ret
1339 1339
1340 1340 def _default_arguments(self, obj):
1341 1341 """Return the list of default arguments of obj if it is callable,
1342 1342 or empty list otherwise."""
1343 1343 call_obj = obj
1344 1344 ret = []
1345 1345 if inspect.isbuiltin(obj):
1346 1346 pass
1347 1347 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1348 1348 if inspect.isclass(obj):
1349 1349 #for cython embededsignature=True the constructor docstring
1350 1350 #belongs to the object itself not __init__
1351 1351 ret += self._default_arguments_from_docstring(
1352 1352 getattr(obj, '__doc__', ''))
1353 1353 # for classes, check for __init__,__new__
1354 1354 call_obj = (getattr(obj, '__init__', None) or
1355 1355 getattr(obj, '__new__', None))
1356 1356 # for all others, check if they are __call__able
1357 1357 elif hasattr(obj, '__call__'):
1358 1358 call_obj = obj.__call__
1359 1359 ret += self._default_arguments_from_docstring(
1360 1360 getattr(call_obj, '__doc__', ''))
1361 1361
1362 1362 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1363 1363 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1364 1364
1365 1365 try:
1366 1366 sig = inspect.signature(call_obj)
1367 1367 ret.extend(k for k, v in sig.parameters.items() if
1368 1368 v.kind in _keeps)
1369 1369 except ValueError:
1370 1370 pass
1371 1371
1372 1372 return list(set(ret))
1373 1373
1374 1374 def python_func_kw_matches(self,text):
1375 1375 """Match named parameters (kwargs) of the last open function"""
1376 1376
1377 1377 if "." in text: # a parameter cannot be dotted
1378 1378 return []
1379 1379 try: regexp = self.__funcParamsRegex
1380 1380 except AttributeError:
1381 1381 regexp = self.__funcParamsRegex = re.compile(r'''
1382 1382 '.*?(?<!\\)' | # single quoted strings or
1383 1383 ".*?(?<!\\)" | # double quoted strings or
1384 1384 \w+ | # identifier
1385 1385 \S # other characters
1386 1386 ''', re.VERBOSE | re.DOTALL)
1387 1387 # 1. find the nearest identifier that comes before an unclosed
1388 1388 # parenthesis before the cursor
1389 1389 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1390 1390 tokens = regexp.findall(self.text_until_cursor)
1391 1391 iterTokens = reversed(tokens); openPar = 0
1392 1392
1393 1393 for token in iterTokens:
1394 1394 if token == ')':
1395 1395 openPar -= 1
1396 1396 elif token == '(':
1397 1397 openPar += 1
1398 1398 if openPar > 0:
1399 1399 # found the last unclosed parenthesis
1400 1400 break
1401 1401 else:
1402 1402 return []
1403 1403 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1404 1404 ids = []
1405 1405 isId = re.compile(r'\w+$').match
1406 1406
1407 1407 while True:
1408 1408 try:
1409 1409 ids.append(next(iterTokens))
1410 1410 if not isId(ids[-1]):
1411 1411 ids.pop(); break
1412 1412 if not next(iterTokens) == '.':
1413 1413 break
1414 1414 except StopIteration:
1415 1415 break
1416 1416
1417 1417 # Find all named arguments already assigned to, as to avoid suggesting
1418 1418 # them again
1419 1419 usedNamedArgs = set()
1420 1420 par_level = -1
1421 1421 for token, next_token in zip(tokens, tokens[1:]):
1422 1422 if token == '(':
1423 1423 par_level += 1
1424 1424 elif token == ')':
1425 1425 par_level -= 1
1426 1426
1427 1427 if par_level != 0:
1428 1428 continue
1429 1429
1430 1430 if next_token != '=':
1431 1431 continue
1432 1432
1433 1433 usedNamedArgs.add(token)
1434 1434
1435 1435 # lookup the candidate callable matches either using global_matches
1436 1436 # or attr_matches for dotted names
1437 1437 if len(ids) == 1:
1438 1438 callableMatches = self.global_matches(ids[0])
1439 1439 else:
1440 1440 callableMatches = self.attr_matches('.'.join(ids[::-1]))
1441 1441 argMatches = []
1442 1442 for callableMatch in callableMatches:
1443 1443 try:
1444 1444 namedArgs = self._default_arguments(eval(callableMatch,
1445 1445 self.namespace))
1446 1446 except:
1447 1447 continue
1448 1448
1449 1449 # Remove used named arguments from the list, no need to show twice
1450 1450 for namedArg in set(namedArgs) - usedNamedArgs:
1451 1451 if namedArg.startswith(text):
1452 1452 argMatches.append(u"%s=" %namedArg)
1453 1453 return argMatches
1454 1454
1455 1455 def dict_key_matches(self, text):
1456 1456 "Match string keys in a dictionary, after e.g. 'foo[' "
1457 1457 def get_keys(obj):
1458 1458 # Objects can define their own completions by defining an
1459 1459 # _ipy_key_completions_() method.
1460 1460 method = get_real_method(obj, '_ipython_key_completions_')
1461 1461 if method is not None:
1462 1462 return method()
1463 1463
1464 1464 # Special case some common in-memory dict-like types
1465 1465 if isinstance(obj, dict) or\
1466 1466 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1467 1467 try:
1468 1468 return list(obj.keys())
1469 1469 except Exception:
1470 1470 return []
1471 1471 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1472 1472 _safe_isinstance(obj, 'numpy', 'void'):
1473 1473 return obj.dtype.names or []
1474 1474 return []
1475 1475
1476 1476 try:
1477 1477 regexps = self.__dict_key_regexps
1478 1478 except AttributeError:
1479 1479 dict_key_re_fmt = r'''(?x)
1480 1480 ( # match dict-referring expression wrt greedy setting
1481 1481 %s
1482 1482 )
1483 1483 \[ # open bracket
1484 1484 \s* # and optional whitespace
1485 1485 ([uUbB]? # string prefix (r not handled)
1486 1486 (?: # unclosed string
1487 1487 '(?:[^']|(?<!\\)\\')*
1488 1488 |
1489 1489 "(?:[^"]|(?<!\\)\\")*
1490 1490 )
1491 1491 )?
1492 1492 $
1493 1493 '''
1494 1494 regexps = self.__dict_key_regexps = {
1495 1495 False: re.compile(dict_key_re_fmt % '''
1496 1496 # identifiers separated by .
1497 1497 (?!\d)\w+
1498 1498 (?:\.(?!\d)\w+)*
1499 1499 '''),
1500 1500 True: re.compile(dict_key_re_fmt % '''
1501 1501 .+
1502 1502 ''')
1503 1503 }
1504 1504
1505 1505 match = regexps[self.greedy].search(self.text_until_cursor)
1506 1506 if match is None:
1507 1507 return []
1508 1508
1509 1509 expr, prefix = match.groups()
1510 1510 try:
1511 1511 obj = eval(expr, self.namespace)
1512 1512 except Exception:
1513 1513 try:
1514 1514 obj = eval(expr, self.global_namespace)
1515 1515 except Exception:
1516 1516 return []
1517 1517
1518 1518 keys = get_keys(obj)
1519 1519 if not keys:
1520 1520 return keys
1521 1521 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims)
1522 1522 if not matches:
1523 1523 return matches
1524 1524
1525 1525 # get the cursor position of
1526 1526 # - the text being completed
1527 1527 # - the start of the key text
1528 1528 # - the start of the completion
1529 1529 text_start = len(self.text_until_cursor) - len(text)
1530 1530 if prefix:
1531 1531 key_start = match.start(2)
1532 1532 completion_start = key_start + token_offset
1533 1533 else:
1534 1534 key_start = completion_start = match.end()
1535 1535
1536 1536 # grab the leading prefix, to make sure all completions start with `text`
1537 1537 if text_start > key_start:
1538 1538 leading = ''
1539 1539 else:
1540 1540 leading = text[text_start:completion_start]
1541 1541
1542 1542 # the index of the `[` character
1543 1543 bracket_idx = match.end(1)
1544 1544
1545 1545 # append closing quote and bracket as appropriate
1546 1546 # this is *not* appropriate if the opening quote or bracket is outside
1547 1547 # the text given to this method
1548 1548 suf = ''
1549 1549 continuation = self.line_buffer[len(self.text_until_cursor):]
1550 1550 if key_start > text_start and closing_quote:
1551 1551 # quotes were opened inside text, maybe close them
1552 1552 if continuation.startswith(closing_quote):
1553 1553 continuation = continuation[len(closing_quote):]
1554 1554 else:
1555 1555 suf += closing_quote
1556 1556 if bracket_idx > text_start:
1557 1557 # brackets were opened inside text, maybe close them
1558 1558 if not continuation.startswith(']'):
1559 1559 suf += ']'
1560 1560
1561 1561 return [leading + k + suf for k in matches]
1562 1562
1563 1563 def unicode_name_matches(self, text):
1564 1564 u"""Match Latex-like syntax for unicode characters base
1565 1565 on the name of the character.
1566 1566
1567 1567 This does ``\\GREEK SMALL LETTER ETA`` -> ``Ξ·``
1568 1568
1569 1569 Works only on valid python 3 identifier, or on combining characters that
1570 1570 will combine to form a valid identifier.
1571 1571
1572 1572 Used on Python 3 only.
1573 1573 """
1574 1574 slashpos = text.rfind('\\')
1575 1575 if slashpos > -1:
1576 1576 s = text[slashpos+1:]
1577 1577 try :
1578 1578 unic = unicodedata.lookup(s)
1579 1579 # allow combining chars
1580 1580 if ('a'+unic).isidentifier():
1581 1581 return '\\'+s,[unic]
1582 1582 except KeyError:
1583 1583 pass
1584 1584 return u'', []
1585 1585
1586 1586
1587 1587 def latex_matches(self, text):
1588 1588 u"""Match Latex syntax for unicode characters.
1589 1589
1590 1590 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``Ξ±``
1591 1591
1592 1592 Used on Python 3 only.
1593 1593 """
1594 1594 slashpos = text.rfind('\\')
1595 1595 if slashpos > -1:
1596 1596 s = text[slashpos:]
1597 1597 if s in latex_symbols:
1598 1598 # Try to complete a full latex symbol to unicode
1599 1599 # \\alpha -> Ξ±
1600 1600 return s, [latex_symbols[s]]
1601 1601 else:
1602 1602 # If a user has partially typed a latex symbol, give them
1603 1603 # a full list of options \al -> [\aleph, \alpha]
1604 1604 matches = [k for k in latex_symbols if k.startswith(s)]
1605 1605 return s, matches
1606 1606 return u'', []
1607 1607
1608 1608 def dispatch_custom_completer(self, text):
1609 1609 if not self.custom_completers:
1610 1610 return
1611 1611
1612 1612 line = self.line_buffer
1613 1613 if not line.strip():
1614 1614 return None
1615 1615
1616 1616 # Create a little structure to pass all the relevant information about
1617 1617 # the current completion to any custom completer.
1618 1618 event = SimpleNamespace()
1619 1619 event.line = line
1620 1620 event.symbol = text
1621 1621 cmd = line.split(None,1)[0]
1622 1622 event.command = cmd
1623 1623 event.text_until_cursor = self.text_until_cursor
1624 1624
1625 1625 # for foo etc, try also to find completer for %foo
1626 1626 if not cmd.startswith(self.magic_escape):
1627 1627 try_magic = self.custom_completers.s_matches(
1628 1628 self.magic_escape + cmd)
1629 1629 else:
1630 1630 try_magic = []
1631 1631
1632 1632 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1633 1633 try_magic,
1634 1634 self.custom_completers.flat_matches(self.text_until_cursor)):
1635 1635 try:
1636 1636 res = c(event)
1637 1637 if res:
1638 1638 # first, try case sensitive match
1639 1639 withcase = [r for r in res if r.startswith(text)]
1640 1640 if withcase:
1641 1641 return withcase
1642 1642 # if none, then case insensitive ones are ok too
1643 1643 text_low = text.lower()
1644 1644 return [r for r in res if r.lower().startswith(text_low)]
1645 1645 except TryNext:
1646 1646 pass
1647 1647
1648 1648 return None
1649 1649
1650 1650 def completions(self, text: str, offset: int)->Iterator[Completion]:
1651 1651 """
1652 1652 Returns an iterator over the possible completions
1653 1653
1654 1654 .. warning:: Unstable
1655 1655
1656 1656 This function is unstable, API may change without warning.
1657 1657 It will also raise unless use in proper context manager.
1658 1658
1659 1659 Parameters
1660 1660 ----------
1661 1661
1662 1662 text:str
1663 1663 Full text of the current input, multi line string.
1664 1664 offset:int
1665 1665 Integer representing the position of the cursor in ``text``. Offset
1666 1666 is 0-based indexed.
1667 1667
1668 1668 Yields
1669 1669 ------
1670 1670 :any:`Completion` object
1671 1671
1672 1672
1673 1673 The cursor on a text can either be seen as being "in between"
1674 1674 characters or "On" a character depending on the interface visible to
1675 1675 the user. For consistency the cursor being on "in between" characters X
1676 1676 and Y is equivalent to the cursor being "on" character Y, that is to say
1677 1677 the character the cursor is on is considered as being after the cursor.
1678 1678
1679 1679 Combining characters may span more that one position in the
1680 1680 text.
1681 1681
1682 1682
1683 1683 .. note::
1684 1684
1685 1685 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1686 1686 fake Completion token to distinguish completion returned by Jedi
1687 1687 and usual IPython completion.
1688 1688
1689 1689 .. note::
1690 1690
1691 1691 Completions are not completely deduplicated yet. If identical
1692 1692 completions are coming from different sources this function does not
1693 1693 ensure that each completion object will only be present once.
1694 1694 """
1695 1695 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1696 1696 "It may change without warnings. "
1697 1697 "Use in corresponding context manager.",
1698 1698 category=ProvisionalCompleterWarning, stacklevel=2)
1699 1699
1700 1700 seen = set()
1701 1701 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1702 1702 if c and (c in seen):
1703 1703 continue
1704 1704 yield c
1705 1705 seen.add(c)
1706 1706
1707 1707 def _completions(self, full_text: str, offset: int, *, _timeout)->Iterator[Completion]:
1708 1708 """
1709 1709 Core completion module.Same signature as :any:`completions`, with the
1710 1710 extra `timeout` parameter (in seconds).
1711 1711
1712 1712
1713 1713 Computing jedi's completion ``.type`` can be quite expensive (it is a
1714 1714 lazy property) and can require some warm-up, more warm up than just
1715 1715 computing the ``name`` of a completion. The warm-up can be :
1716 1716
1717 1717 - Long warm-up the first time a module is encountered after
1718 1718 install/update: actually build parse/inference tree.
1719 1719
1720 1720 - first time the module is encountered in a session: load tree from
1721 1721 disk.
1722 1722
1723 1723 We don't want to block completions for tens of seconds so we give the
1724 1724 completer a "budget" of ``_timeout`` seconds per invocation to compute
1725 1725 completions types, the completions that have not yet been computed will
1726 1726 be marked as "unknown" an will have a chance to be computed next round
1727 1727 are things get cached.
1728 1728
1729 1729 Keep in mind that Jedi is not the only thing treating the completion so
1730 1730 keep the timeout short-ish as if we take more than 0.3 second we still
1731 1731 have lots of processing to do.
1732 1732
1733 1733 """
1734 1734 deadline = time.monotonic() + _timeout
1735 1735
1736 1736
1737 1737 before = full_text[:offset]
1738 1738 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1739 1739
1740 1740 matched_text, matches, matches_origin, jedi_matches = self._complete(
1741 1741 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1742 1742
1743 1743 iter_jm = iter(jedi_matches)
1744 1744 if _timeout:
1745 1745 for jm in iter_jm:
1746 1746 try:
1747 1747 type_ = jm.type
1748 1748 except Exception:
1749 1749 if self.debug:
1750 1750 print("Error in Jedi getting type of ", jm)
1751 1751 type_ = None
1752 1752 delta = len(jm.name_with_symbols) - len(jm.complete)
1753 1753 yield Completion(start=offset - delta,
1754 1754 end=offset,
1755 1755 text=jm.name_with_symbols,
1756 1756 type=type_,
1757 1757 _origin='jedi')
1758 1758
1759 1759 if time.monotonic() > deadline:
1760 1760 break
1761 1761
1762 1762 for jm in iter_jm:
1763 1763 delta = len(jm.name_with_symbols) - len(jm.complete)
1764 1764 yield Completion(start=offset - delta,
1765 1765 end=offset,
1766 1766 text=jm.name_with_symbols,
1767 1767 type='<unknown>', # don't compute type for speed
1768 1768 _origin='jedi')
1769 1769
1770 1770
1771 1771 start_offset = before.rfind(matched_text)
1772 1772
1773 1773 # TODO:
1774 1774 # Supress this, right now just for debug.
1775 1775 if jedi_matches and matches and self.debug:
1776 1776 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--', _origin='debug')
1777 1777
1778 1778 # I'm unsure if this is always true, so let's assert and see if it
1779 1779 # crash
1780 1780 assert before.endswith(matched_text)
1781 1781 for m, t in zip(matches, matches_origin):
1782 1782 yield Completion(start=start_offset, end=offset, text=m, _origin=t)
1783 1783
1784 1784
1785 1785 def complete(self, text=None, line_buffer=None, cursor_pos=None):
1786 1786 """Find completions for the given text and line context.
1787 1787
1788 1788 Note that both the text and the line_buffer are optional, but at least
1789 1789 one of them must be given.
1790 1790
1791 1791 Parameters
1792 1792 ----------
1793 1793 text : string, optional
1794 1794 Text to perform the completion on. If not given, the line buffer
1795 1795 is split using the instance's CompletionSplitter object.
1796 1796
1797 1797 line_buffer : string, optional
1798 1798 If not given, the completer attempts to obtain the current line
1799 1799 buffer via readline. This keyword allows clients which are
1800 1800 requesting for text completions in non-readline contexts to inform
1801 1801 the completer of the entire text.
1802 1802
1803 1803 cursor_pos : int, optional
1804 1804 Index of the cursor in the full line buffer. Should be provided by
1805 1805 remote frontends where kernel has no access to frontend state.
1806 1806
1807 1807 Returns
1808 1808 -------
1809 1809 text : str
1810 1810 Text that was actually used in the completion.
1811 1811
1812 1812 matches : list
1813 1813 A list of completion matches.
1814 1814
1815 1815
1816 1816 .. note::
1817 1817
1818 1818 This API is likely to be deprecated and replaced by
1819 1819 :any:`IPCompleter.completions` in the future.
1820 1820
1821 1821
1822 1822 """
1823 1823 warnings.warn('`Completer.complete` is pending deprecation since '
1824 1824 'IPython 6.0 and will be replaced by `Completer.completions`.',
1825 1825 PendingDeprecationWarning)
1826 1826 # potential todo, FOLD the 3rd throw away argument of _complete
1827 1827 # into the first 2 one.
1828 1828 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
1829 1829
1830 1830 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
1831 full_text=None, return_jedi_results=True) -> (str, List[str], List[object]):
1831 full_text=None, return_jedi_results=True) -> Tuple[str, List[str], List[str], Iterable[_FakeJediCompletion]]:
1832 1832 """
1833 1833
1834 1834 Like complete but can also returns raw jedi completions as well as the
1835 1835 origin of the completion text. This could (and should) be made much
1836 1836 cleaner but that will be simpler once we drop the old (and stateful)
1837 1837 :any:`complete` API.
1838 1838
1839 1839
1840 1840 With current provisional API, cursor_pos act both (depending on the
1841 1841 caller) as the offset in the ``text`` or ``line_buffer``, or as the
1842 1842 ``column`` when passing multiline strings this could/should be renamed
1843 1843 but would add extra noise.
1844 1844 """
1845 1845
1846 1846 # if the cursor position isn't given, the only sane assumption we can
1847 1847 # make is that it's at the end of the line (the common case)
1848 1848 if cursor_pos is None:
1849 1849 cursor_pos = len(line_buffer) if text is None else len(text)
1850 1850
1851 1851 if self.use_main_ns:
1852 1852 self.namespace = __main__.__dict__
1853 1853
1854 1854 # if text is either None or an empty string, rely on the line buffer
1855 1855 if (not line_buffer) and full_text:
1856 1856 line_buffer = full_text.split('\n')[cursor_line]
1857 1857 if not text:
1858 1858 text = self.splitter.split_line(line_buffer, cursor_pos)
1859 1859
1860 1860 if self.backslash_combining_completions:
1861 1861 # allow deactivation of these on windows.
1862 1862 base_text = text if not line_buffer else line_buffer[:cursor_pos]
1863 1863 latex_text, latex_matches = self.latex_matches(base_text)
1864 1864 if latex_matches:
1865 1865 return latex_text, latex_matches, ['latex_matches']*len(latex_matches), ()
1866 1866 name_text = ''
1867 1867 name_matches = []
1868 1868 for meth in (self.unicode_name_matches, back_latex_name_matches, back_unicode_name_matches):
1869 1869 name_text, name_matches = meth(base_text)
1870 1870 if name_text:
1871 return name_text, name_matches, [meth.__qualname__]*len(name_matches), {}
1871 return name_text, name_matches, [meth.__qualname__]*len(name_matches), ()
1872 1872
1873 1873
1874 1874 # If no line buffer is given, assume the input text is all there was
1875 1875 if line_buffer is None:
1876 1876 line_buffer = text
1877 1877
1878 1878 self.line_buffer = line_buffer
1879 1879 self.text_until_cursor = self.line_buffer[:cursor_pos]
1880 1880
1881 1881 # Start with a clean slate of completions
1882 1882 matches = []
1883 1883 custom_res = self.dispatch_custom_completer(text)
1884 1884 # FIXME: we should extend our api to return a dict with completions for
1885 1885 # different types of objects. The rlcomplete() method could then
1886 1886 # simply collapse the dict into a list for readline, but we'd have
1887 1887 # richer completion semantics in other evironments.
1888 1888 completions = ()
1889 1889 if self.use_jedi and return_jedi_results:
1890 1890 if not full_text:
1891 1891 full_text = line_buffer
1892 1892 completions = self._jedi_matches(
1893 1893 cursor_pos, cursor_line, full_text)
1894 1894 if custom_res is not None:
1895 1895 # did custom completers produce something?
1896 1896 matches = [(m, 'custom') for m in custom_res]
1897 1897 else:
1898 1898 # Extend the list of completions with the results of each
1899 1899 # matcher, so we return results to the user from all
1900 1900 # namespaces.
1901 1901 if self.merge_completions:
1902 1902 matches = []
1903 1903 for matcher in self.matchers:
1904 1904 try:
1905 1905 matches.extend([(m, matcher.__qualname__)
1906 1906 for m in matcher(text)])
1907 1907 except:
1908 1908 # Show the ugly traceback if the matcher causes an
1909 1909 # exception, but do NOT crash the kernel!
1910 1910 sys.excepthook(*sys.exc_info())
1911 1911 else:
1912 1912 for matcher in self.matchers:
1913 1913 matches = [(m, matcher.__qualname__)
1914 1914 for m in matcher(text)]
1915 1915 if matches:
1916 1916 break
1917 1917 seen = set()
1918 1918 filtered_matches = set()
1919 1919 for m in matches:
1920 1920 t, c = m
1921 1921 if t not in seen:
1922 1922 filtered_matches.add(m)
1923 1923 seen.add(t)
1924 1924
1925 filtered_matches = sorted(
1925 _filtered_matches = sorted(
1926 1926 set(filtered_matches), key=lambda x: completions_sorting_key(x[0]))
1927 1927
1928 matches = [m[0] for m in filtered_matches]
1929 origins = [m[1] for m in filtered_matches]
1928 _matches = [m[0] for m in _filtered_matches]
1929 origins = [m[1] for m in _filtered_matches]
1930 1930
1931 self.matches = matches
1931 self.matches = _matches
1932 1932
1933 return text, matches, origins, completions
1933 return text, _matches, origins, completions
@@ -1,524 +1,510 b''
1 1 # -*- coding: utf-8 -*-
2 2 """IPython Test Process Controller
3 3
4 4 This module runs one or more subprocesses which will actually run the IPython
5 5 test suite.
6 6
7 7 """
8 8
9 9 # Copyright (c) IPython Development Team.
10 10 # Distributed under the terms of the Modified BSD License.
11 11
12 12
13 13 import argparse
14 14 import multiprocessing.pool
15 15 import os
16 16 import stat
17 17 import shutil
18 18 import signal
19 19 import sys
20 20 import subprocess
21 21 import time
22 22
23 23 from .iptest import (
24 24 have, test_group_names as py_test_group_names, test_sections, StreamCapturer,
25 25 )
26 26 from IPython.utils.path import compress_user
27 27 from IPython.utils.py3compat import bytes_to_str
28 28 from IPython.utils.sysinfo import get_sys_info
29 29 from IPython.utils.tempdir import TemporaryDirectory
30 30
31 try:
32 # Python >= 3.3
33 from subprocess import TimeoutExpired
34 def popen_wait(p, timeout):
35 return p.wait(timeout)
36 except ImportError:
37 class TimeoutExpired(Exception):
38 pass
39 def popen_wait(p, timeout):
40 """backport of Popen.wait from Python 3"""
41 for i in range(int(10 * timeout)):
42 if p.poll() is not None:
43 return
44 time.sleep(0.1)
45 if p.poll() is None:
46 raise TimeoutExpired
31 def popen_wait(p, timeout):
32 return p.wait(timeout)
47 33
48 34 class TestController(object):
49 35 """Run tests in a subprocess
50 36 """
51 37 #: str, IPython test suite to be executed.
52 38 section = None
53 39 #: list, command line arguments to be executed
54 40 cmd = None
55 41 #: dict, extra environment variables to set for the subprocess
56 42 env = None
57 43 #: list, TemporaryDirectory instances to clear up when the process finishes
58 44 dirs = None
59 45 #: subprocess.Popen instance
60 46 process = None
61 47 #: str, process stdout+stderr
62 48 stdout = None
63 49
64 50 def __init__(self):
65 51 self.cmd = []
66 52 self.env = {}
67 53 self.dirs = []
68 54
69 55 def setup(self):
70 56 """Create temporary directories etc.
71 57
72 58 This is only called when we know the test group will be run. Things
73 59 created here may be cleaned up by self.cleanup().
74 60 """
75 61 pass
76 62
77 63 def launch(self, buffer_output=False, capture_output=False):
78 64 # print('*** ENV:', self.env) # dbg
79 65 # print('*** CMD:', self.cmd) # dbg
80 66 env = os.environ.copy()
81 67 env.update(self.env)
82 68 if buffer_output:
83 69 capture_output = True
84 70 self.stdout_capturer = c = StreamCapturer(echo=not buffer_output)
85 71 c.start()
86 72 stdout = c.writefd if capture_output else None
87 73 stderr = subprocess.STDOUT if capture_output else None
88 74 self.process = subprocess.Popen(self.cmd, stdout=stdout,
89 75 stderr=stderr, env=env)
90 76
91 77 def wait(self):
92 78 self.process.wait()
93 79 self.stdout_capturer.halt()
94 80 self.stdout = self.stdout_capturer.get_buffer()
95 81 return self.process.returncode
96 82
97 83 def print_extra_info(self):
98 84 """Print extra information about this test run.
99 85
100 86 If we're running in parallel and showing the concise view, this is only
101 87 called if the test group fails. Otherwise, it's called before the test
102 88 group is started.
103 89
104 90 The base implementation does nothing, but it can be overridden by
105 91 subclasses.
106 92 """
107 93 return
108 94
109 95 def cleanup_process(self):
110 96 """Cleanup on exit by killing any leftover processes."""
111 97 subp = self.process
112 98 if subp is None or (subp.poll() is not None):
113 99 return # Process doesn't exist, or is already dead.
114 100
115 101 try:
116 102 print('Cleaning up stale PID: %d' % subp.pid)
117 103 subp.kill()
118 104 except: # (OSError, WindowsError) ?
119 105 # This is just a best effort, if we fail or the process was
120 106 # really gone, ignore it.
121 107 pass
122 108 else:
123 109 for i in range(10):
124 110 if subp.poll() is None:
125 111 time.sleep(0.1)
126 112 else:
127 113 break
128 114
129 115 if subp.poll() is None:
130 116 # The process did not die...
131 117 print('... failed. Manual cleanup may be required.')
132 118
133 119 def cleanup(self):
134 120 "Kill process if it's still alive, and clean up temporary directories"
135 121 self.cleanup_process()
136 122 for td in self.dirs:
137 123 td.cleanup()
138 124
139 125 __del__ = cleanup
140 126
141 127
142 128 class PyTestController(TestController):
143 129 """Run Python tests using IPython.testing.iptest"""
144 130 #: str, Python command to execute in subprocess
145 131 pycmd = None
146 132
147 133 def __init__(self, section, options):
148 134 """Create new test runner."""
149 135 TestController.__init__(self)
150 136 self.section = section
151 137 # pycmd is put into cmd[2] in PyTestController.launch()
152 138 self.cmd = [sys.executable, '-c', None, section]
153 139 self.pycmd = "from IPython.testing.iptest import run_iptest; run_iptest()"
154 140 self.options = options
155 141
156 142 def setup(self):
157 143 ipydir = TemporaryDirectory()
158 144 self.dirs.append(ipydir)
159 145 self.env['IPYTHONDIR'] = ipydir.name
160 146 self.workingdir = workingdir = TemporaryDirectory()
161 147 self.dirs.append(workingdir)
162 148 self.env['IPTEST_WORKING_DIR'] = workingdir.name
163 149 # This means we won't get odd effects from our own matplotlib config
164 150 self.env['MPLCONFIGDIR'] = workingdir.name
165 151 # For security reasons (http://bugs.python.org/issue16202), use
166 152 # a temporary directory to which other users have no access.
167 153 self.env['TMPDIR'] = workingdir.name
168 154
169 155 # Add a non-accessible directory to PATH (see gh-7053)
170 156 noaccess = os.path.join(self.workingdir.name, "_no_access_")
171 157 self.noaccess = noaccess
172 158 os.mkdir(noaccess, 0)
173 159
174 160 PATH = os.environ.get('PATH', '')
175 161 if PATH:
176 162 PATH = noaccess + os.pathsep + PATH
177 163 else:
178 164 PATH = noaccess
179 165 self.env['PATH'] = PATH
180 166
181 167 # From options:
182 168 if self.options.xunit:
183 169 self.add_xunit()
184 170 if self.options.coverage:
185 171 self.add_coverage()
186 172 self.env['IPTEST_SUBPROC_STREAMS'] = self.options.subproc_streams
187 173 self.cmd.extend(self.options.extra_args)
188 174
189 175 def cleanup(self):
190 176 """
191 177 Make the non-accessible directory created in setup() accessible
192 178 again, otherwise deleting the workingdir will fail.
193 179 """
194 180 os.chmod(self.noaccess, stat.S_IRWXU)
195 181 TestController.cleanup(self)
196 182
197 183 @property
198 184 def will_run(self):
199 185 try:
200 186 return test_sections[self.section].will_run
201 187 except KeyError:
202 188 return True
203 189
204 190 def add_xunit(self):
205 191 xunit_file = os.path.abspath(self.section + '.xunit.xml')
206 192 self.cmd.extend(['--with-xunit', '--xunit-file', xunit_file])
207 193
208 194 def add_coverage(self):
209 195 try:
210 196 sources = test_sections[self.section].includes
211 197 except KeyError:
212 198 sources = ['IPython']
213 199
214 200 coverage_rc = ("[run]\n"
215 201 "data_file = {data_file}\n"
216 202 "source =\n"
217 203 " {source}\n"
218 204 ).format(data_file=os.path.abspath('.coverage.'+self.section),
219 205 source="\n ".join(sources))
220 206 config_file = os.path.join(self.workingdir.name, '.coveragerc')
221 207 with open(config_file, 'w') as f:
222 208 f.write(coverage_rc)
223 209
224 210 self.env['COVERAGE_PROCESS_START'] = config_file
225 211 self.pycmd = "import coverage; coverage.process_startup(); " + self.pycmd
226 212
227 213 def launch(self, buffer_output=False):
228 214 self.cmd[2] = self.pycmd
229 215 super(PyTestController, self).launch(buffer_output=buffer_output)
230 216
231 217
232 218 def prepare_controllers(options):
233 219 """Returns two lists of TestController instances, those to run, and those
234 220 not to run."""
235 221 testgroups = options.testgroups
236 222 if not testgroups:
237 223 testgroups = py_test_group_names
238 224
239 225 controllers = [PyTestController(name, options) for name in testgroups]
240 226
241 227 to_run = [c for c in controllers if c.will_run]
242 228 not_run = [c for c in controllers if not c.will_run]
243 229 return to_run, not_run
244 230
245 231 def do_run(controller, buffer_output=True):
246 232 """Setup and run a test controller.
247 233
248 234 If buffer_output is True, no output is displayed, to avoid it appearing
249 235 interleaved. In this case, the caller is responsible for displaying test
250 236 output on failure.
251 237
252 238 Returns
253 239 -------
254 240 controller : TestController
255 241 The same controller as passed in, as a convenience for using map() type
256 242 APIs.
257 243 exitcode : int
258 244 The exit code of the test subprocess. Non-zero indicates failure.
259 245 """
260 246 try:
261 247 try:
262 248 controller.setup()
263 249 if not buffer_output:
264 250 controller.print_extra_info()
265 251 controller.launch(buffer_output=buffer_output)
266 252 except Exception:
267 253 import traceback
268 254 traceback.print_exc()
269 255 return controller, 1 # signal failure
270 256
271 257 exitcode = controller.wait()
272 258 return controller, exitcode
273 259
274 260 except KeyboardInterrupt:
275 261 return controller, -signal.SIGINT
276 262 finally:
277 263 controller.cleanup()
278 264
279 265 def report():
280 266 """Return a string with a summary report of test-related variables."""
281 267 inf = get_sys_info()
282 268 out = []
283 269 def _add(name, value):
284 270 out.append((name, value))
285 271
286 272 _add('IPython version', inf['ipython_version'])
287 273 _add('IPython commit', "{} ({})".format(inf['commit_hash'], inf['commit_source']))
288 274 _add('IPython package', compress_user(inf['ipython_path']))
289 275 _add('Python version', inf['sys_version'].replace('\n',''))
290 276 _add('sys.executable', compress_user(inf['sys_executable']))
291 277 _add('Platform', inf['platform'])
292 278
293 279 width = max(len(n) for (n,v) in out)
294 280 out = ["{:<{width}}: {}\n".format(n, v, width=width) for (n,v) in out]
295 281
296 282 avail = []
297 283 not_avail = []
298 284
299 285 for k, is_avail in have.items():
300 286 if is_avail:
301 287 avail.append(k)
302 288 else:
303 289 not_avail.append(k)
304 290
305 291 if avail:
306 292 out.append('\nTools and libraries available at test time:\n')
307 293 avail.sort()
308 294 out.append(' ' + ' '.join(avail)+'\n')
309 295
310 296 if not_avail:
311 297 out.append('\nTools and libraries NOT available at test time:\n')
312 298 not_avail.sort()
313 299 out.append(' ' + ' '.join(not_avail)+'\n')
314 300
315 301 return ''.join(out)
316 302
317 303 def run_iptestall(options):
318 304 """Run the entire IPython test suite by calling nose and trial.
319 305
320 306 This function constructs :class:`IPTester` instances for all IPython
321 307 modules and package and then runs each of them. This causes the modules
322 308 and packages of IPython to be tested each in their own subprocess using
323 309 nose.
324 310
325 311 Parameters
326 312 ----------
327 313
328 314 All parameters are passed as attributes of the options object.
329 315
330 316 testgroups : list of str
331 317 Run only these sections of the test suite. If empty, run all the available
332 318 sections.
333 319
334 320 fast : int or None
335 321 Run the test suite in parallel, using n simultaneous processes. If None
336 322 is passed, one process is used per CPU core. Default 1 (i.e. sequential)
337 323
338 324 inc_slow : bool
339 325 Include slow tests. By default, these tests aren't run.
340 326
341 327 url : unicode
342 328 Address:port to use when running the JS tests.
343 329
344 330 xunit : bool
345 331 Produce Xunit XML output. This is written to multiple foo.xunit.xml files.
346 332
347 333 coverage : bool or str
348 334 Measure code coverage from tests. True will store the raw coverage data,
349 335 or pass 'html' or 'xml' to get reports.
350 336
351 337 extra_args : list
352 338 Extra arguments to pass to the test subprocesses, e.g. '-v'
353 339 """
354 340 to_run, not_run = prepare_controllers(options)
355 341
356 342 def justify(ltext, rtext, width=70, fill='-'):
357 343 ltext += ' '
358 344 rtext = (' ' + rtext).rjust(width - len(ltext), fill)
359 345 return ltext + rtext
360 346
361 347 # Run all test runners, tracking execution time
362 348 failed = []
363 349 t_start = time.time()
364 350
365 351 print()
366 352 if options.fast == 1:
367 353 # This actually means sequential, i.e. with 1 job
368 354 for controller in to_run:
369 355 print('Test group:', controller.section)
370 356 sys.stdout.flush() # Show in correct order when output is piped
371 357 controller, res = do_run(controller, buffer_output=False)
372 358 if res:
373 359 failed.append(controller)
374 360 if res == -signal.SIGINT:
375 361 print("Interrupted")
376 362 break
377 363 print()
378 364
379 365 else:
380 366 # Run tests concurrently
381 367 try:
382 368 pool = multiprocessing.pool.ThreadPool(options.fast)
383 369 for (controller, res) in pool.imap_unordered(do_run, to_run):
384 370 res_string = 'OK' if res == 0 else 'FAILED'
385 371 print(justify('Test group: ' + controller.section, res_string))
386 372 if res:
387 373 controller.print_extra_info()
388 374 print(bytes_to_str(controller.stdout))
389 375 failed.append(controller)
390 376 if res == -signal.SIGINT:
391 377 print("Interrupted")
392 378 break
393 379 except KeyboardInterrupt:
394 380 return
395 381
396 382 for controller in not_run:
397 383 print(justify('Test group: ' + controller.section, 'NOT RUN'))
398 384
399 385 t_end = time.time()
400 386 t_tests = t_end - t_start
401 387 nrunners = len(to_run)
402 388 nfail = len(failed)
403 389 # summarize results
404 390 print('_'*70)
405 391 print('Test suite completed for system with the following information:')
406 392 print(report())
407 393 took = "Took %.3fs." % t_tests
408 394 print('Status: ', end='')
409 395 if not failed:
410 396 print('OK (%d test groups).' % nrunners, took)
411 397 else:
412 398 # If anything went wrong, point out what command to rerun manually to
413 399 # see the actual errors and individual summary
414 400 failed_sections = [c.section for c in failed]
415 401 print('ERROR - {} out of {} test groups failed ({}).'.format(nfail,
416 402 nrunners, ', '.join(failed_sections)), took)
417 403 print()
418 404 print('You may wish to rerun these, with:')
419 405 print(' iptest', *failed_sections)
420 406 print()
421 407
422 408 if options.coverage:
423 409 from coverage import coverage, CoverageException
424 410 cov = coverage(data_file='.coverage')
425 411 cov.combine()
426 412 cov.save()
427 413
428 414 # Coverage HTML report
429 415 if options.coverage == 'html':
430 416 html_dir = 'ipy_htmlcov'
431 417 shutil.rmtree(html_dir, ignore_errors=True)
432 418 print("Writing HTML coverage report to %s/ ... " % html_dir, end="")
433 419 sys.stdout.flush()
434 420
435 421 # Custom HTML reporter to clean up module names.
436 422 from coverage.html import HtmlReporter
437 423 class CustomHtmlReporter(HtmlReporter):
438 424 def find_code_units(self, morfs):
439 425 super(CustomHtmlReporter, self).find_code_units(morfs)
440 426 for cu in self.code_units:
441 427 nameparts = cu.name.split(os.sep)
442 428 if 'IPython' not in nameparts:
443 429 continue
444 430 ix = nameparts.index('IPython')
445 431 cu.name = '.'.join(nameparts[ix:])
446 432
447 433 # Reimplement the html_report method with our custom reporter
448 434 cov.get_data()
449 435 cov.config.from_args(omit='*{0}tests{0}*'.format(os.sep), html_dir=html_dir,
450 436 html_title='IPython test coverage',
451 437 )
452 438 reporter = CustomHtmlReporter(cov, cov.config)
453 439 reporter.report(None)
454 440 print('done.')
455 441
456 442 # Coverage XML report
457 443 elif options.coverage == 'xml':
458 444 try:
459 445 cov.xml_report(outfile='ipy_coverage.xml')
460 446 except CoverageException as e:
461 447 print('Generating coverage report failed. Are you running javascript tests only?')
462 448 import traceback
463 449 traceback.print_exc()
464 450
465 451 if failed:
466 452 # Ensure that our exit code indicates failure
467 453 sys.exit(1)
468 454
469 455 argparser = argparse.ArgumentParser(description='Run IPython test suite')
470 456 argparser.add_argument('testgroups', nargs='*',
471 457 help='Run specified groups of tests. If omitted, run '
472 458 'all tests.')
473 459 argparser.add_argument('--all', action='store_true',
474 460 help='Include slow tests not run by default.')
475 461 argparser.add_argument('--url', help="URL to use for the JS tests.")
476 462 argparser.add_argument('-j', '--fast', nargs='?', const=None, default=1, type=int,
477 463 help='Run test sections in parallel. This starts as many '
478 464 'processes as you have cores, or you can specify a number.')
479 465 argparser.add_argument('--xunit', action='store_true',
480 466 help='Produce Xunit XML results')
481 467 argparser.add_argument('--coverage', nargs='?', const=True, default=False,
482 468 help="Measure test coverage. Specify 'html' or "
483 469 "'xml' to get reports.")
484 470 argparser.add_argument('--subproc-streams', default='capture',
485 471 help="What to do with stdout/stderr from subprocesses. "
486 472 "'capture' (default), 'show' and 'discard' are the options.")
487 473
488 474 def default_options():
489 475 """Get an argparse Namespace object with the default arguments, to pass to
490 476 :func:`run_iptestall`.
491 477 """
492 478 options = argparser.parse_args([])
493 479 options.extra_args = []
494 480 return options
495 481
496 482 def main():
497 483 # iptest doesn't work correctly if the working directory is the
498 484 # root of the IPython source tree. Tell the user to avoid
499 485 # frustration.
500 486 if os.path.exists(os.path.join(os.getcwd(),
501 487 'IPython', 'testing', '__main__.py')):
502 488 print("Don't run iptest from the IPython source directory",
503 489 file=sys.stderr)
504 490 sys.exit(1)
505 491 # Arguments after -- should be passed through to nose. Argparse treats
506 492 # everything after -- as regular positional arguments, so we separate them
507 493 # first.
508 494 try:
509 495 ix = sys.argv.index('--')
510 496 except ValueError:
511 497 to_parse = sys.argv[1:]
512 498 extra_args = []
513 499 else:
514 500 to_parse = sys.argv[1:ix]
515 501 extra_args = sys.argv[ix+1:]
516 502
517 503 options = argparser.parse_args(to_parse)
518 504 options.extra_args = extra_args
519 505
520 506 run_iptestall(options)
521 507
522 508
523 509 if __name__ == '__main__':
524 510 main()
General Comments 0
You need to be logged in to leave comments. Login now