##// END OF EJS Templates
Merge pull request #13202 from Kojoley/fix-unintentional-skipping-of-module-level-doctests...
Matthias Bussonnier -
r26876:a05271cc merge
parent child Browse files
Show More
@@ -1,2239 +1,2239 b''
1 1 """Completion for IPython.
2 2
3 3 This module started as fork of the rlcompleter module in the Python standard
4 4 library. The original enhancements made to rlcompleter have been sent
5 5 upstream and were accepted as of Python 2.3,
6 6
7 7 This module now support a wide variety of completion mechanism both available
8 8 for normal classic Python code, as well as completer for IPython specific
9 9 Syntax like magics.
10 10
11 11 Latex and Unicode completion
12 12 ============================
13 13
14 14 IPython and compatible frontends not only can complete your code, but can help
15 15 you to input a wide range of characters. In particular we allow you to insert
16 16 a unicode character using the tab completion mechanism.
17 17
18 18 Forward latex/unicode completion
19 19 --------------------------------
20 20
21 21 Forward completion allows you to easily type a unicode character using its latex
22 22 name, or unicode long description. To do so type a backslash follow by the
23 23 relevant name and press tab:
24 24
25 25
26 26 Using latex completion:
27 27
28 28 .. code::
29 29
30 30 \\alpha<tab>
31 31 α
32 32
33 33 or using unicode completion:
34 34
35 35
36 36 .. code::
37 37
38 38 \\GREEK SMALL LETTER ALPHA<tab>
39 39 α
40 40
41 41
42 42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 43 dots) are also available, unlike latex they need to be put after the their
44 44 counterpart that is to say, `F\\\\vec<tab>` is correct, not `\\\\vec<tab>F`.
45 45
46 46 Some browsers are known to display combining characters incorrectly.
47 47
48 48 Backward latex completion
49 49 -------------------------
50 50
51 51 It is sometime challenging to know how to type a character, if you are using
52 52 IPython, or any compatible frontend you can prepend backslash to the character
53 53 and press `<tab>` to expand it to its latex form.
54 54
55 55 .. code::
56 56
57 57 \\α<tab>
58 58 \\alpha
59 59
60 60
61 61 Both forward and backward completions can be deactivated by setting the
62 62 ``Completer.backslash_combining_completions`` option to ``False``.
63 63
64 64
65 65 Experimental
66 66 ============
67 67
68 68 Starting with IPython 6.0, this module can make use of the Jedi library to
69 69 generate completions both using static analysis of the code, and dynamically
70 70 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
71 71 for Python. The APIs attached to this new mechanism is unstable and will
72 72 raise unless use in an :any:`provisionalcompleter` context manager.
73 73
74 74 You will find that the following are experimental:
75 75
76 76 - :any:`provisionalcompleter`
77 77 - :any:`IPCompleter.completions`
78 78 - :any:`Completion`
79 79 - :any:`rectify_completions`
80 80
81 81 .. note::
82 82
83 83 better name for :any:`rectify_completions` ?
84 84
85 85 We welcome any feedback on these new API, and we also encourage you to try this
86 86 module in debug mode (start IPython with ``--Completer.debug=True``) in order
87 87 to have extra logging information if :any:`jedi` is crashing, or if current
88 88 IPython completer pending deprecations are returning results not yet handled
89 89 by :any:`jedi`
90 90
91 91 Using Jedi for tab completion allow snippets like the following to work without
92 92 having to execute any code:
93 93
94 94 >>> myvar = ['hello', 42]
95 95 ... myvar[1].bi<tab>
96 96
97 97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 98 executing any code unlike the previously available ``IPCompleter.greedy``
99 99 option.
100 100
101 101 Be sure to update :any:`jedi` to the latest stable version or to try the
102 102 current development version to get better completions.
103 103 """
104 104
105 105
106 106 # Copyright (c) IPython Development Team.
107 107 # Distributed under the terms of the Modified BSD License.
108 108 #
109 109 # Some of this code originated from rlcompleter in the Python standard library
110 110 # Copyright (C) 2001 Python Software Foundation, www.python.org
111 111
112 112
113 113 import builtins as builtin_mod
114 114 import glob
115 115 import inspect
116 116 import itertools
117 117 import keyword
118 118 import os
119 119 import re
120 120 import string
121 121 import sys
122 122 import time
123 123 import unicodedata
124 124 import uuid
125 125 import warnings
126 126 from contextlib import contextmanager
127 127 from importlib import import_module
128 128 from types import SimpleNamespace
129 129 from typing import Iterable, Iterator, List, Tuple, Union, Any, Sequence, Dict, NamedTuple, Pattern, Optional
130 130
131 131 from IPython.core.error import TryNext
132 132 from IPython.core.inputtransformer2 import ESC_MAGIC
133 133 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
134 134 from IPython.core.oinspect import InspectColors
135 135 from IPython.utils import generics
136 136 from IPython.utils.dir2 import dir2, get_real_method
137 137 from IPython.utils.path import ensure_dir_exists
138 138 from IPython.utils.process import arg_split
139 139 from traitlets import Bool, Enum, Int, List as ListTrait, Unicode, default, observe
140 140 from traitlets.config.configurable import Configurable
141 141
142 142 import __main__
143 143
144 144 # skip module docstests
145 skip_doctest = True
145 __skip_doctest__ = True
146 146
147 147 try:
148 148 import jedi
149 149 jedi.settings.case_insensitive_completion = False
150 150 import jedi.api.helpers
151 151 import jedi.api.classes
152 152 JEDI_INSTALLED = True
153 153 except ImportError:
154 154 JEDI_INSTALLED = False
155 155 #-----------------------------------------------------------------------------
156 156 # Globals
157 157 #-----------------------------------------------------------------------------
158 158
159 159 # ranges where we have most of the valid unicode names. We could be more finer
160 160 # grained but is it worth it for performace While unicode have character in the
161 161 # rage 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
162 162 # write this). With below range we cover them all, with a density of ~67%
163 163 # biggest next gap we consider only adds up about 1% density and there are 600
164 164 # gaps that would need hard coding.
165 165 _UNICODE_RANGES = [(32, 0x3134b), (0xe0001, 0xe01f0)]
166 166
167 167 # Public API
168 168 __all__ = ['Completer','IPCompleter']
169 169
170 170 if sys.platform == 'win32':
171 171 PROTECTABLES = ' '
172 172 else:
173 173 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
174 174
175 175 # Protect against returning an enormous number of completions which the frontend
176 176 # may have trouble processing.
177 177 MATCHES_LIMIT = 500
178 178
179 179 _deprecation_readline_sentinel = object()
180 180
181 181
182 182 class ProvisionalCompleterWarning(FutureWarning):
183 183 """
184 184 Exception raise by an experimental feature in this module.
185 185
186 186 Wrap code in :any:`provisionalcompleter` context manager if you
187 187 are certain you want to use an unstable feature.
188 188 """
189 189 pass
190 190
191 191 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
192 192
193 193 @contextmanager
194 194 def provisionalcompleter(action='ignore'):
195 195 """
196 196 This context manager has to be used in any place where unstable completer
197 197 behavior and API may be called.
198 198
199 199 >>> with provisionalcompleter():
200 200 ... completer.do_experimental_things() # works
201 201
202 202 >>> completer.do_experimental_things() # raises.
203 203
204 204 .. note::
205 205
206 206 Unstable
207 207
208 208 By using this context manager you agree that the API in use may change
209 209 without warning, and that you won't complain if they do so.
210 210
211 211 You also understand that, if the API is not to your liking, you should report
212 212 a bug to explain your use case upstream.
213 213
214 214 We'll be happy to get your feedback, feature requests, and improvements on
215 215 any of the unstable APIs!
216 216 """
217 217 with warnings.catch_warnings():
218 218 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
219 219 yield
220 220
221 221
222 222 def has_open_quotes(s):
223 223 """Return whether a string has open quotes.
224 224
225 225 This simply counts whether the number of quote characters of either type in
226 226 the string is odd.
227 227
228 228 Returns
229 229 -------
230 230 If there is an open quote, the quote character is returned. Else, return
231 231 False.
232 232 """
233 233 # We check " first, then ', so complex cases with nested quotes will get
234 234 # the " to take precedence.
235 235 if s.count('"') % 2:
236 236 return '"'
237 237 elif s.count("'") % 2:
238 238 return "'"
239 239 else:
240 240 return False
241 241
242 242
243 243 def protect_filename(s, protectables=PROTECTABLES):
244 244 """Escape a string to protect certain characters."""
245 245 if set(s) & set(protectables):
246 246 if sys.platform == "win32":
247 247 return '"' + s + '"'
248 248 else:
249 249 return "".join(("\\" + c if c in protectables else c) for c in s)
250 250 else:
251 251 return s
252 252
253 253
254 254 def expand_user(path:str) -> Tuple[str, bool, str]:
255 255 """Expand ``~``-style usernames in strings.
256 256
257 257 This is similar to :func:`os.path.expanduser`, but it computes and returns
258 258 extra information that will be useful if the input was being used in
259 259 computing completions, and you wish to return the completions with the
260 260 original '~' instead of its expanded value.
261 261
262 262 Parameters
263 263 ----------
264 264 path : str
265 265 String to be expanded. If no ~ is present, the output is the same as the
266 266 input.
267 267
268 268 Returns
269 269 -------
270 270 newpath : str
271 271 Result of ~ expansion in the input path.
272 272 tilde_expand : bool
273 273 Whether any expansion was performed or not.
274 274 tilde_val : str
275 275 The value that ~ was replaced with.
276 276 """
277 277 # Default values
278 278 tilde_expand = False
279 279 tilde_val = ''
280 280 newpath = path
281 281
282 282 if path.startswith('~'):
283 283 tilde_expand = True
284 284 rest = len(path)-1
285 285 newpath = os.path.expanduser(path)
286 286 if rest:
287 287 tilde_val = newpath[:-rest]
288 288 else:
289 289 tilde_val = newpath
290 290
291 291 return newpath, tilde_expand, tilde_val
292 292
293 293
294 294 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
295 295 """Does the opposite of expand_user, with its outputs.
296 296 """
297 297 if tilde_expand:
298 298 return path.replace(tilde_val, '~')
299 299 else:
300 300 return path
301 301
302 302
303 303 def completions_sorting_key(word):
304 304 """key for sorting completions
305 305
306 306 This does several things:
307 307
308 308 - Demote any completions starting with underscores to the end
309 309 - Insert any %magic and %%cellmagic completions in the alphabetical order
310 310 by their name
311 311 """
312 312 prio1, prio2 = 0, 0
313 313
314 314 if word.startswith('__'):
315 315 prio1 = 2
316 316 elif word.startswith('_'):
317 317 prio1 = 1
318 318
319 319 if word.endswith('='):
320 320 prio1 = -1
321 321
322 322 if word.startswith('%%'):
323 323 # If there's another % in there, this is something else, so leave it alone
324 324 if not "%" in word[2:]:
325 325 word = word[2:]
326 326 prio2 = 2
327 327 elif word.startswith('%'):
328 328 if not "%" in word[1:]:
329 329 word = word[1:]
330 330 prio2 = 1
331 331
332 332 return prio1, word, prio2
333 333
334 334
335 335 class _FakeJediCompletion:
336 336 """
337 337 This is a workaround to communicate to the UI that Jedi has crashed and to
338 338 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
339 339
340 340 Added in IPython 6.0 so should likely be removed for 7.0
341 341
342 342 """
343 343
344 344 def __init__(self, name):
345 345
346 346 self.name = name
347 347 self.complete = name
348 348 self.type = 'crashed'
349 349 self.name_with_symbols = name
350 350 self.signature = ''
351 351 self._origin = 'fake'
352 352
353 353 def __repr__(self):
354 354 return '<Fake completion object jedi has crashed>'
355 355
356 356
357 357 class Completion:
358 358 """
359 359 Completion object used and return by IPython completers.
360 360
361 361 .. warning::
362 362
363 363 Unstable
364 364
365 365 This function is unstable, API may change without warning.
366 366 It will also raise unless use in proper context manager.
367 367
368 368 This act as a middle ground :any:`Completion` object between the
369 369 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
370 370 object. While Jedi need a lot of information about evaluator and how the
371 371 code should be ran/inspected, PromptToolkit (and other frontend) mostly
372 372 need user facing information.
373 373
374 374 - Which range should be replaced replaced by what.
375 375 - Some metadata (like completion type), or meta information to displayed to
376 376 the use user.
377 377
378 378 For debugging purpose we can also store the origin of the completion (``jedi``,
379 379 ``IPython.python_matches``, ``IPython.magics_matches``...).
380 380 """
381 381
382 382 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
383 383
384 384 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
385 385 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
386 386 "It may change without warnings. "
387 387 "Use in corresponding context manager.",
388 388 category=ProvisionalCompleterWarning, stacklevel=2)
389 389
390 390 self.start = start
391 391 self.end = end
392 392 self.text = text
393 393 self.type = type
394 394 self.signature = signature
395 395 self._origin = _origin
396 396
397 397 def __repr__(self):
398 398 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
399 399 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
400 400
401 401 def __eq__(self, other)->Bool:
402 402 """
403 403 Equality and hash do not hash the type (as some completer may not be
404 404 able to infer the type), but are use to (partially) de-duplicate
405 405 completion.
406 406
407 407 Completely de-duplicating completion is a bit tricker that just
408 408 comparing as it depends on surrounding text, which Completions are not
409 409 aware of.
410 410 """
411 411 return self.start == other.start and \
412 412 self.end == other.end and \
413 413 self.text == other.text
414 414
415 415 def __hash__(self):
416 416 return hash((self.start, self.end, self.text))
417 417
418 418
419 419 _IC = Iterable[Completion]
420 420
421 421
422 422 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
423 423 """
424 424 Deduplicate a set of completions.
425 425
426 426 .. warning::
427 427
428 428 Unstable
429 429
430 430 This function is unstable, API may change without warning.
431 431
432 432 Parameters
433 433 ----------
434 434 text : str
435 435 text that should be completed.
436 436 completions : Iterator[Completion]
437 437 iterator over the completions to deduplicate
438 438
439 439 Yields
440 440 ------
441 441 `Completions` objects
442 442 Completions coming from multiple sources, may be different but end up having
443 443 the same effect when applied to ``text``. If this is the case, this will
444 444 consider completions as equal and only emit the first encountered.
445 445 Not folded in `completions()` yet for debugging purpose, and to detect when
446 446 the IPython completer does return things that Jedi does not, but should be
447 447 at some point.
448 448 """
449 449 completions = list(completions)
450 450 if not completions:
451 451 return
452 452
453 453 new_start = min(c.start for c in completions)
454 454 new_end = max(c.end for c in completions)
455 455
456 456 seen = set()
457 457 for c in completions:
458 458 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
459 459 if new_text not in seen:
460 460 yield c
461 461 seen.add(new_text)
462 462
463 463
464 464 def rectify_completions(text: str, completions: _IC, *, _debug=False)->_IC:
465 465 """
466 466 Rectify a set of completions to all have the same ``start`` and ``end``
467 467
468 468 .. warning::
469 469
470 470 Unstable
471 471
472 472 This function is unstable, API may change without warning.
473 473 It will also raise unless use in proper context manager.
474 474
475 475 Parameters
476 476 ----------
477 477 text : str
478 478 text that should be completed.
479 479 completions : Iterator[Completion]
480 480 iterator over the completions to rectify
481 481
482 482 Notes
483 483 -----
484 484 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
485 485 the Jupyter Protocol requires them to behave like so. This will readjust
486 486 the completion to have the same ``start`` and ``end`` by padding both
487 487 extremities with surrounding text.
488 488
489 489 During stabilisation should support a ``_debug`` option to log which
490 490 completion are return by the IPython completer and not found in Jedi in
491 491 order to make upstream bug report.
492 492 """
493 493 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
494 494 "It may change without warnings. "
495 495 "Use in corresponding context manager.",
496 496 category=ProvisionalCompleterWarning, stacklevel=2)
497 497
498 498 completions = list(completions)
499 499 if not completions:
500 500 return
501 501 starts = (c.start for c in completions)
502 502 ends = (c.end for c in completions)
503 503
504 504 new_start = min(starts)
505 505 new_end = max(ends)
506 506
507 507 seen_jedi = set()
508 508 seen_python_matches = set()
509 509 for c in completions:
510 510 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
511 511 if c._origin == 'jedi':
512 512 seen_jedi.add(new_text)
513 513 elif c._origin == 'IPCompleter.python_matches':
514 514 seen_python_matches.add(new_text)
515 515 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
516 516 diff = seen_python_matches.difference(seen_jedi)
517 517 if diff and _debug:
518 518 print('IPython.python matches have extras:', diff)
519 519
520 520
521 521 if sys.platform == 'win32':
522 522 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
523 523 else:
524 524 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
525 525
526 526 GREEDY_DELIMS = ' =\r\n'
527 527
528 528
529 529 class CompletionSplitter(object):
530 530 """An object to split an input line in a manner similar to readline.
531 531
532 532 By having our own implementation, we can expose readline-like completion in
533 533 a uniform manner to all frontends. This object only needs to be given the
534 534 line of text to be split and the cursor position on said line, and it
535 535 returns the 'word' to be completed on at the cursor after splitting the
536 536 entire line.
537 537
538 538 What characters are used as splitting delimiters can be controlled by
539 539 setting the ``delims`` attribute (this is a property that internally
540 540 automatically builds the necessary regular expression)"""
541 541
542 542 # Private interface
543 543
544 544 # A string of delimiter characters. The default value makes sense for
545 545 # IPython's most typical usage patterns.
546 546 _delims = DELIMS
547 547
548 548 # The expression (a normal string) to be compiled into a regular expression
549 549 # for actual splitting. We store it as an attribute mostly for ease of
550 550 # debugging, since this type of code can be so tricky to debug.
551 551 _delim_expr = None
552 552
553 553 # The regular expression that does the actual splitting
554 554 _delim_re = None
555 555
556 556 def __init__(self, delims=None):
557 557 delims = CompletionSplitter._delims if delims is None else delims
558 558 self.delims = delims
559 559
560 560 @property
561 561 def delims(self):
562 562 """Return the string of delimiter characters."""
563 563 return self._delims
564 564
565 565 @delims.setter
566 566 def delims(self, delims):
567 567 """Set the delimiters for line splitting."""
568 568 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
569 569 self._delim_re = re.compile(expr)
570 570 self._delims = delims
571 571 self._delim_expr = expr
572 572
573 573 def split_line(self, line, cursor_pos=None):
574 574 """Split a line of text with a cursor at the given position.
575 575 """
576 576 l = line if cursor_pos is None else line[:cursor_pos]
577 577 return self._delim_re.split(l)[-1]
578 578
579 579
580 580
581 581 class Completer(Configurable):
582 582
583 583 greedy = Bool(False,
584 584 help="""Activate greedy completion
585 585 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
586 586
587 587 This will enable completion on elements of lists, results of function calls, etc.,
588 588 but can be unsafe because the code is actually evaluated on TAB.
589 589 """
590 590 ).tag(config=True)
591 591
592 592 use_jedi = Bool(default_value=JEDI_INSTALLED,
593 593 help="Experimental: Use Jedi to generate autocompletions. "
594 594 "Default to True if jedi is installed.").tag(config=True)
595 595
596 596 jedi_compute_type_timeout = Int(default_value=400,
597 597 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
598 598 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
599 599 performance by preventing jedi to build its cache.
600 600 """).tag(config=True)
601 601
602 602 debug = Bool(default_value=False,
603 603 help='Enable debug for the Completer. Mostly print extra '
604 604 'information for experimental jedi integration.')\
605 605 .tag(config=True)
606 606
607 607 backslash_combining_completions = Bool(True,
608 608 help="Enable unicode completions, e.g. \\alpha<tab> . "
609 609 "Includes completion of latex commands, unicode names, and expanding "
610 610 "unicode characters back to latex commands.").tag(config=True)
611 611
612 612
613 613
614 614 def __init__(self, namespace=None, global_namespace=None, **kwargs):
615 615 """Create a new completer for the command line.
616 616
617 617 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
618 618
619 619 If unspecified, the default namespace where completions are performed
620 620 is __main__ (technically, __main__.__dict__). Namespaces should be
621 621 given as dictionaries.
622 622
623 623 An optional second namespace can be given. This allows the completer
624 624 to handle cases where both the local and global scopes need to be
625 625 distinguished.
626 626 """
627 627
628 628 # Don't bind to namespace quite yet, but flag whether the user wants a
629 629 # specific namespace or to use __main__.__dict__. This will allow us
630 630 # to bind to __main__.__dict__ at completion time, not now.
631 631 if namespace is None:
632 632 self.use_main_ns = True
633 633 else:
634 634 self.use_main_ns = False
635 635 self.namespace = namespace
636 636
637 637 # The global namespace, if given, can be bound directly
638 638 if global_namespace is None:
639 639 self.global_namespace = {}
640 640 else:
641 641 self.global_namespace = global_namespace
642 642
643 643 self.custom_matchers = []
644 644
645 645 super(Completer, self).__init__(**kwargs)
646 646
647 647 def complete(self, text, state):
648 648 """Return the next possible completion for 'text'.
649 649
650 650 This is called successively with state == 0, 1, 2, ... until it
651 651 returns None. The completion should begin with 'text'.
652 652
653 653 """
654 654 if self.use_main_ns:
655 655 self.namespace = __main__.__dict__
656 656
657 657 if state == 0:
658 658 if "." in text:
659 659 self.matches = self.attr_matches(text)
660 660 else:
661 661 self.matches = self.global_matches(text)
662 662 try:
663 663 return self.matches[state]
664 664 except IndexError:
665 665 return None
666 666
667 667 def global_matches(self, text):
668 668 """Compute matches when text is a simple name.
669 669
670 670 Return a list of all keywords, built-in functions and names currently
671 671 defined in self.namespace or self.global_namespace that match.
672 672
673 673 """
674 674 matches = []
675 675 match_append = matches.append
676 676 n = len(text)
677 677 for lst in [keyword.kwlist,
678 678 builtin_mod.__dict__.keys(),
679 679 self.namespace.keys(),
680 680 self.global_namespace.keys()]:
681 681 for word in lst:
682 682 if word[:n] == text and word != "__builtins__":
683 683 match_append(word)
684 684
685 685 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
686 686 for lst in [self.namespace.keys(),
687 687 self.global_namespace.keys()]:
688 688 shortened = {"_".join([sub[0] for sub in word.split('_')]) : word
689 689 for word in lst if snake_case_re.match(word)}
690 690 for word in shortened.keys():
691 691 if word[:n] == text and word != "__builtins__":
692 692 match_append(shortened[word])
693 693 return matches
694 694
695 695 def attr_matches(self, text):
696 696 """Compute matches when text contains a dot.
697 697
698 698 Assuming the text is of the form NAME.NAME....[NAME], and is
699 699 evaluatable in self.namespace or self.global_namespace, it will be
700 700 evaluated and its attributes (as revealed by dir()) are used as
701 701 possible completions. (For class instances, class members are
702 702 also considered.)
703 703
704 704 WARNING: this can still invoke arbitrary C code, if an object
705 705 with a __getattr__ hook is evaluated.
706 706
707 707 """
708 708
709 709 # Another option, seems to work great. Catches things like ''.<tab>
710 710 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
711 711
712 712 if m:
713 713 expr, attr = m.group(1, 3)
714 714 elif self.greedy:
715 715 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
716 716 if not m2:
717 717 return []
718 718 expr, attr = m2.group(1,2)
719 719 else:
720 720 return []
721 721
722 722 try:
723 723 obj = eval(expr, self.namespace)
724 724 except:
725 725 try:
726 726 obj = eval(expr, self.global_namespace)
727 727 except:
728 728 return []
729 729
730 730 if self.limit_to__all__ and hasattr(obj, '__all__'):
731 731 words = get__all__entries(obj)
732 732 else:
733 733 words = dir2(obj)
734 734
735 735 try:
736 736 words = generics.complete_object(obj, words)
737 737 except TryNext:
738 738 pass
739 739 except AssertionError:
740 740 raise
741 741 except Exception:
742 742 # Silence errors from completion function
743 743 #raise # dbg
744 744 pass
745 745 # Build match list to return
746 746 n = len(attr)
747 747 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
748 748
749 749
750 750 def get__all__entries(obj):
751 751 """returns the strings in the __all__ attribute"""
752 752 try:
753 753 words = getattr(obj, '__all__')
754 754 except:
755 755 return []
756 756
757 757 return [w for w in words if isinstance(w, str)]
758 758
759 759
760 760 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], prefix: str, delims: str,
761 761 extra_prefix: Optional[Tuple[str, bytes]]=None) -> Tuple[str, int, List[str]]:
762 762 """Used by dict_key_matches, matching the prefix to a list of keys
763 763
764 764 Parameters
765 765 ----------
766 766 keys
767 767 list of keys in dictionary currently being completed.
768 768 prefix
769 769 Part of the text already typed by the user. E.g. `mydict[b'fo`
770 770 delims
771 771 String of delimiters to consider when finding the current key.
772 772 extra_prefix : optional
773 773 Part of the text already typed in multi-key index cases. E.g. for
774 774 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
775 775
776 776 Returns
777 777 -------
778 778 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
779 779 ``quote`` being the quote that need to be used to close current string.
780 780 ``token_start`` the position where the replacement should start occurring,
781 781 ``matches`` a list of replacement/completion
782 782
783 783 """
784 784 prefix_tuple = extra_prefix if extra_prefix else ()
785 785 Nprefix = len(prefix_tuple)
786 786 def filter_prefix_tuple(key):
787 787 # Reject too short keys
788 788 if len(key) <= Nprefix:
789 789 return False
790 790 # Reject keys with non str/bytes in it
791 791 for k in key:
792 792 if not isinstance(k, (str, bytes)):
793 793 return False
794 794 # Reject keys that do not match the prefix
795 795 for k, pt in zip(key, prefix_tuple):
796 796 if k != pt:
797 797 return False
798 798 # All checks passed!
799 799 return True
800 800
801 801 filtered_keys:List[Union[str,bytes]] = []
802 802 def _add_to_filtered_keys(key):
803 803 if isinstance(key, (str, bytes)):
804 804 filtered_keys.append(key)
805 805
806 806 for k in keys:
807 807 if isinstance(k, tuple):
808 808 if filter_prefix_tuple(k):
809 809 _add_to_filtered_keys(k[Nprefix])
810 810 else:
811 811 _add_to_filtered_keys(k)
812 812
813 813 if not prefix:
814 814 return '', 0, [repr(k) for k in filtered_keys]
815 815 quote_match = re.search('["\']', prefix)
816 816 assert quote_match is not None # silence mypy
817 817 quote = quote_match.group()
818 818 try:
819 819 prefix_str = eval(prefix + quote, {})
820 820 except Exception:
821 821 return '', 0, []
822 822
823 823 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
824 824 token_match = re.search(pattern, prefix, re.UNICODE)
825 825 assert token_match is not None # silence mypy
826 826 token_start = token_match.start()
827 827 token_prefix = token_match.group()
828 828
829 829 matched:List[str] = []
830 830 for key in filtered_keys:
831 831 try:
832 832 if not key.startswith(prefix_str):
833 833 continue
834 834 except (AttributeError, TypeError, UnicodeError):
835 835 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
836 836 continue
837 837
838 838 # reformat remainder of key to begin with prefix
839 839 rem = key[len(prefix_str):]
840 840 # force repr wrapped in '
841 841 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
842 842 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
843 843 if quote == '"':
844 844 # The entered prefix is quoted with ",
845 845 # but the match is quoted with '.
846 846 # A contained " hence needs escaping for comparison:
847 847 rem_repr = rem_repr.replace('"', '\\"')
848 848
849 849 # then reinsert prefix from start of token
850 850 matched.append('%s%s' % (token_prefix, rem_repr))
851 851 return quote, token_start, matched
852 852
853 853
854 854 def cursor_to_position(text:str, line:int, column:int)->int:
855 855 """
856 856 Convert the (line,column) position of the cursor in text to an offset in a
857 857 string.
858 858
859 859 Parameters
860 860 ----------
861 861 text : str
862 862 The text in which to calculate the cursor offset
863 863 line : int
864 864 Line of the cursor; 0-indexed
865 865 column : int
866 866 Column of the cursor 0-indexed
867 867
868 868 Returns
869 869 -------
870 870 Position of the cursor in ``text``, 0-indexed.
871 871
872 872 See Also
873 873 --------
874 874 position_to_cursor : reciprocal of this function
875 875
876 876 """
877 877 lines = text.split('\n')
878 878 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
879 879
880 880 return sum(len(l) + 1 for l in lines[:line]) + column
881 881
882 882 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
883 883 """
884 884 Convert the position of the cursor in text (0 indexed) to a line
885 885 number(0-indexed) and a column number (0-indexed) pair
886 886
887 887 Position should be a valid position in ``text``.
888 888
889 889 Parameters
890 890 ----------
891 891 text : str
892 892 The text in which to calculate the cursor offset
893 893 offset : int
894 894 Position of the cursor in ``text``, 0-indexed.
895 895
896 896 Returns
897 897 -------
898 898 (line, column) : (int, int)
899 899 Line of the cursor; 0-indexed, column of the cursor 0-indexed
900 900
901 901 See Also
902 902 --------
903 903 cursor_to_position : reciprocal of this function
904 904
905 905 """
906 906
907 907 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
908 908
909 909 before = text[:offset]
910 910 blines = before.split('\n') # ! splitnes trim trailing \n
911 911 line = before.count('\n')
912 912 col = len(blines[-1])
913 913 return line, col
914 914
915 915
916 916 def _safe_isinstance(obj, module, class_name):
917 917 """Checks if obj is an instance of module.class_name if loaded
918 918 """
919 919 return (module in sys.modules and
920 920 isinstance(obj, getattr(import_module(module), class_name)))
921 921
922 922 def back_unicode_name_matches(text:str) -> Tuple[str, Sequence[str]]:
923 923 """Match Unicode characters back to Unicode name
924 924
925 925 This does ``☃`` -> ``\\snowman``
926 926
927 927 Note that snowman is not a valid python3 combining character but will be expanded.
928 928 Though it will not recombine back to the snowman character by the completion machinery.
929 929
930 930 This will not either back-complete standard sequences like \\n, \\b ...
931 931
932 932 Returns
933 933 =======
934 934
935 935 Return a tuple with two elements:
936 936
937 937 - The Unicode character that was matched (preceded with a backslash), or
938 938 empty string,
939 939 - a sequence (of 1), name for the match Unicode character, preceded by
940 940 backslash, or empty if no match.
941 941
942 942 """
943 943 if len(text)<2:
944 944 return '', ()
945 945 maybe_slash = text[-2]
946 946 if maybe_slash != '\\':
947 947 return '', ()
948 948
949 949 char = text[-1]
950 950 # no expand on quote for completion in strings.
951 951 # nor backcomplete standard ascii keys
952 952 if char in string.ascii_letters or char in ('"',"'"):
953 953 return '', ()
954 954 try :
955 955 unic = unicodedata.name(char)
956 956 return '\\'+char,('\\'+unic,)
957 957 except KeyError:
958 958 pass
959 959 return '', ()
960 960
961 961 def back_latex_name_matches(text:str) -> Tuple[str, Sequence[str]] :
962 962 """Match latex characters back to unicode name
963 963
964 964 This does ``\\ℵ`` -> ``\\aleph``
965 965
966 966 """
967 967 if len(text)<2:
968 968 return '', ()
969 969 maybe_slash = text[-2]
970 970 if maybe_slash != '\\':
971 971 return '', ()
972 972
973 973
974 974 char = text[-1]
975 975 # no expand on quote for completion in strings.
976 976 # nor backcomplete standard ascii keys
977 977 if char in string.ascii_letters or char in ('"',"'"):
978 978 return '', ()
979 979 try :
980 980 latex = reverse_latex_symbol[char]
981 981 # '\\' replace the \ as well
982 982 return '\\'+char,[latex]
983 983 except KeyError:
984 984 pass
985 985 return '', ()
986 986
987 987
988 988 def _formatparamchildren(parameter) -> str:
989 989 """
990 990 Get parameter name and value from Jedi Private API
991 991
992 992 Jedi does not expose a simple way to get `param=value` from its API.
993 993
994 994 Parameters
995 995 ----------
996 996 parameter
997 997 Jedi's function `Param`
998 998
999 999 Returns
1000 1000 -------
1001 1001 A string like 'a', 'b=1', '*args', '**kwargs'
1002 1002
1003 1003 """
1004 1004 description = parameter.description
1005 1005 if not description.startswith('param '):
1006 1006 raise ValueError('Jedi function parameter description have change format.'
1007 1007 'Expected "param ...", found %r".' % description)
1008 1008 return description[6:]
1009 1009
1010 1010 def _make_signature(completion)-> str:
1011 1011 """
1012 1012 Make the signature from a jedi completion
1013 1013
1014 1014 Parameters
1015 1015 ----------
1016 1016 completion : jedi.Completion
1017 1017 object does not complete a function type
1018 1018
1019 1019 Returns
1020 1020 -------
1021 1021 a string consisting of the function signature, with the parenthesis but
1022 1022 without the function name. example:
1023 1023 `(a, *args, b=1, **kwargs)`
1024 1024
1025 1025 """
1026 1026
1027 1027 # it looks like this might work on jedi 0.17
1028 1028 if hasattr(completion, 'get_signatures'):
1029 1029 signatures = completion.get_signatures()
1030 1030 if not signatures:
1031 1031 return '(?)'
1032 1032
1033 1033 c0 = completion.get_signatures()[0]
1034 1034 return '('+c0.to_string().split('(', maxsplit=1)[1]
1035 1035
1036 1036 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1037 1037 for p in signature.defined_names()) if f])
1038 1038
1039 1039
1040 1040 class _CompleteResult(NamedTuple):
1041 1041 matched_text : str
1042 1042 matches: Sequence[str]
1043 1043 matches_origin: Sequence[str]
1044 1044 jedi_matches: Any
1045 1045
1046 1046
1047 1047 class IPCompleter(Completer):
1048 1048 """Extension of the completer class with IPython-specific features"""
1049 1049
1050 1050 __dict_key_regexps: Optional[Dict[bool,Pattern]] = None
1051 1051
1052 1052 @observe('greedy')
1053 1053 def _greedy_changed(self, change):
1054 1054 """update the splitter and readline delims when greedy is changed"""
1055 1055 if change['new']:
1056 1056 self.splitter.delims = GREEDY_DELIMS
1057 1057 else:
1058 1058 self.splitter.delims = DELIMS
1059 1059
1060 1060 dict_keys_only = Bool(False,
1061 1061 help="""Whether to show dict key matches only""")
1062 1062
1063 1063 merge_completions = Bool(True,
1064 1064 help="""Whether to merge completion results into a single list
1065 1065
1066 1066 If False, only the completion results from the first non-empty
1067 1067 completer will be returned.
1068 1068 """
1069 1069 ).tag(config=True)
1070 1070 omit__names = Enum((0,1,2), default_value=2,
1071 1071 help="""Instruct the completer to omit private method names
1072 1072
1073 1073 Specifically, when completing on ``object.<tab>``.
1074 1074
1075 1075 When 2 [default]: all names that start with '_' will be excluded.
1076 1076
1077 1077 When 1: all 'magic' names (``__foo__``) will be excluded.
1078 1078
1079 1079 When 0: nothing will be excluded.
1080 1080 """
1081 1081 ).tag(config=True)
1082 1082 limit_to__all__ = Bool(False,
1083 1083 help="""
1084 1084 DEPRECATED as of version 5.0.
1085 1085
1086 1086 Instruct the completer to use __all__ for the completion
1087 1087
1088 1088 Specifically, when completing on ``object.<tab>``.
1089 1089
1090 1090 When True: only those names in obj.__all__ will be included.
1091 1091
1092 1092 When False [default]: the __all__ attribute is ignored
1093 1093 """,
1094 1094 ).tag(config=True)
1095 1095
1096 1096 profile_completions = Bool(
1097 1097 default_value=False,
1098 1098 help="If True, emit profiling data for completion subsystem using cProfile."
1099 1099 ).tag(config=True)
1100 1100
1101 1101 profiler_output_dir = Unicode(
1102 1102 default_value=".completion_profiles",
1103 1103 help="Template for path at which to output profile data for completions."
1104 1104 ).tag(config=True)
1105 1105
1106 1106 @observe('limit_to__all__')
1107 1107 def _limit_to_all_changed(self, change):
1108 1108 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1109 1109 'value has been deprecated since IPython 5.0, will be made to have '
1110 1110 'no effects and then removed in future version of IPython.',
1111 1111 UserWarning)
1112 1112
1113 1113 def __init__(self, shell=None, namespace=None, global_namespace=None,
1114 1114 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
1115 1115 """IPCompleter() -> completer
1116 1116
1117 1117 Return a completer object.
1118 1118
1119 1119 Parameters
1120 1120 ----------
1121 1121 shell
1122 1122 a pointer to the ipython shell itself. This is needed
1123 1123 because this completer knows about magic functions, and those can
1124 1124 only be accessed via the ipython instance.
1125 1125 namespace : dict, optional
1126 1126 an optional dict where completions are performed.
1127 1127 global_namespace : dict, optional
1128 1128 secondary optional dict for completions, to
1129 1129 handle cases (such as IPython embedded inside functions) where
1130 1130 both Python scopes are visible.
1131 1131 use_readline : bool, optional
1132 1132 DEPRECATED, ignored since IPython 6.0, will have no effects
1133 1133 """
1134 1134
1135 1135 self.magic_escape = ESC_MAGIC
1136 1136 self.splitter = CompletionSplitter()
1137 1137
1138 1138 if use_readline is not _deprecation_readline_sentinel:
1139 1139 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
1140 1140 DeprecationWarning, stacklevel=2)
1141 1141
1142 1142 # _greedy_changed() depends on splitter and readline being defined:
1143 1143 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
1144 1144 config=config, **kwargs)
1145 1145
1146 1146 # List where completion matches will be stored
1147 1147 self.matches = []
1148 1148 self.shell = shell
1149 1149 # Regexp to split filenames with spaces in them
1150 1150 self.space_name_re = re.compile(r'([^\\] )')
1151 1151 # Hold a local ref. to glob.glob for speed
1152 1152 self.glob = glob.glob
1153 1153
1154 1154 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1155 1155 # buffers, to avoid completion problems.
1156 1156 term = os.environ.get('TERM','xterm')
1157 1157 self.dumb_terminal = term in ['dumb','emacs']
1158 1158
1159 1159 # Special handling of backslashes needed in win32 platforms
1160 1160 if sys.platform == "win32":
1161 1161 self.clean_glob = self._clean_glob_win32
1162 1162 else:
1163 1163 self.clean_glob = self._clean_glob
1164 1164
1165 1165 #regexp to parse docstring for function signature
1166 1166 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1167 1167 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1168 1168 #use this if positional argument name is also needed
1169 1169 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1170 1170
1171 1171 self.magic_arg_matchers = [
1172 1172 self.magic_config_matches,
1173 1173 self.magic_color_matches,
1174 1174 ]
1175 1175
1176 1176 # This is set externally by InteractiveShell
1177 1177 self.custom_completers = None
1178 1178
1179 1179 # This is a list of names of unicode characters that can be completed
1180 1180 # into their corresponding unicode value. The list is large, so we
1181 1181 # laziliy initialize it on first use. Consuming code should access this
1182 1182 # attribute through the `@unicode_names` property.
1183 1183 self._unicode_names = None
1184 1184
1185 1185 @property
1186 1186 def matchers(self) -> List[Any]:
1187 1187 """All active matcher routines for completion"""
1188 1188 if self.dict_keys_only:
1189 1189 return [self.dict_key_matches]
1190 1190
1191 1191 if self.use_jedi:
1192 1192 return [
1193 1193 *self.custom_matchers,
1194 1194 self.file_matches,
1195 1195 self.magic_matches,
1196 1196 self.dict_key_matches,
1197 1197 ]
1198 1198 else:
1199 1199 return [
1200 1200 *self.custom_matchers,
1201 1201 self.python_matches,
1202 1202 self.file_matches,
1203 1203 self.magic_matches,
1204 1204 self.python_func_kw_matches,
1205 1205 self.dict_key_matches,
1206 1206 ]
1207 1207
1208 1208 def all_completions(self, text:str) -> List[str]:
1209 1209 """
1210 1210 Wrapper around the completion methods for the benefit of emacs.
1211 1211 """
1212 1212 prefix = text.rpartition('.')[0]
1213 1213 with provisionalcompleter():
1214 1214 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1215 1215 for c in self.completions(text, len(text))]
1216 1216
1217 1217 return self.complete(text)[1]
1218 1218
1219 1219 def _clean_glob(self, text:str):
1220 1220 return self.glob("%s*" % text)
1221 1221
1222 1222 def _clean_glob_win32(self, text:str):
1223 1223 return [f.replace("\\","/")
1224 1224 for f in self.glob("%s*" % text)]
1225 1225
1226 1226 def file_matches(self, text:str)->List[str]:
1227 1227 """Match filenames, expanding ~USER type strings.
1228 1228
1229 1229 Most of the seemingly convoluted logic in this completer is an
1230 1230 attempt to handle filenames with spaces in them. And yet it's not
1231 1231 quite perfect, because Python's readline doesn't expose all of the
1232 1232 GNU readline details needed for this to be done correctly.
1233 1233
1234 1234 For a filename with a space in it, the printed completions will be
1235 1235 only the parts after what's already been typed (instead of the
1236 1236 full completions, as is normally done). I don't think with the
1237 1237 current (as of Python 2.3) Python readline it's possible to do
1238 1238 better."""
1239 1239
1240 1240 # chars that require escaping with backslash - i.e. chars
1241 1241 # that readline treats incorrectly as delimiters, but we
1242 1242 # don't want to treat as delimiters in filename matching
1243 1243 # when escaped with backslash
1244 1244 if text.startswith('!'):
1245 1245 text = text[1:]
1246 1246 text_prefix = u'!'
1247 1247 else:
1248 1248 text_prefix = u''
1249 1249
1250 1250 text_until_cursor = self.text_until_cursor
1251 1251 # track strings with open quotes
1252 1252 open_quotes = has_open_quotes(text_until_cursor)
1253 1253
1254 1254 if '(' in text_until_cursor or '[' in text_until_cursor:
1255 1255 lsplit = text
1256 1256 else:
1257 1257 try:
1258 1258 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1259 1259 lsplit = arg_split(text_until_cursor)[-1]
1260 1260 except ValueError:
1261 1261 # typically an unmatched ", or backslash without escaped char.
1262 1262 if open_quotes:
1263 1263 lsplit = text_until_cursor.split(open_quotes)[-1]
1264 1264 else:
1265 1265 return []
1266 1266 except IndexError:
1267 1267 # tab pressed on empty line
1268 1268 lsplit = ""
1269 1269
1270 1270 if not open_quotes and lsplit != protect_filename(lsplit):
1271 1271 # if protectables are found, do matching on the whole escaped name
1272 1272 has_protectables = True
1273 1273 text0,text = text,lsplit
1274 1274 else:
1275 1275 has_protectables = False
1276 1276 text = os.path.expanduser(text)
1277 1277
1278 1278 if text == "":
1279 1279 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1280 1280
1281 1281 # Compute the matches from the filesystem
1282 1282 if sys.platform == 'win32':
1283 1283 m0 = self.clean_glob(text)
1284 1284 else:
1285 1285 m0 = self.clean_glob(text.replace('\\', ''))
1286 1286
1287 1287 if has_protectables:
1288 1288 # If we had protectables, we need to revert our changes to the
1289 1289 # beginning of filename so that we don't double-write the part
1290 1290 # of the filename we have so far
1291 1291 len_lsplit = len(lsplit)
1292 1292 matches = [text_prefix + text0 +
1293 1293 protect_filename(f[len_lsplit:]) for f in m0]
1294 1294 else:
1295 1295 if open_quotes:
1296 1296 # if we have a string with an open quote, we don't need to
1297 1297 # protect the names beyond the quote (and we _shouldn't_, as
1298 1298 # it would cause bugs when the filesystem call is made).
1299 1299 matches = m0 if sys.platform == "win32" else\
1300 1300 [protect_filename(f, open_quotes) for f in m0]
1301 1301 else:
1302 1302 matches = [text_prefix +
1303 1303 protect_filename(f) for f in m0]
1304 1304
1305 1305 # Mark directories in input list by appending '/' to their names.
1306 1306 return [x+'/' if os.path.isdir(x) else x for x in matches]
1307 1307
1308 1308 def magic_matches(self, text:str):
1309 1309 """Match magics"""
1310 1310 # Get all shell magics now rather than statically, so magics loaded at
1311 1311 # runtime show up too.
1312 1312 lsm = self.shell.magics_manager.lsmagic()
1313 1313 line_magics = lsm['line']
1314 1314 cell_magics = lsm['cell']
1315 1315 pre = self.magic_escape
1316 1316 pre2 = pre+pre
1317 1317
1318 1318 explicit_magic = text.startswith(pre)
1319 1319
1320 1320 # Completion logic:
1321 1321 # - user gives %%: only do cell magics
1322 1322 # - user gives %: do both line and cell magics
1323 1323 # - no prefix: do both
1324 1324 # In other words, line magics are skipped if the user gives %% explicitly
1325 1325 #
1326 1326 # We also exclude magics that match any currently visible names:
1327 1327 # https://github.com/ipython/ipython/issues/4877, unless the user has
1328 1328 # typed a %:
1329 1329 # https://github.com/ipython/ipython/issues/10754
1330 1330 bare_text = text.lstrip(pre)
1331 1331 global_matches = self.global_matches(bare_text)
1332 1332 if not explicit_magic:
1333 1333 def matches(magic):
1334 1334 """
1335 1335 Filter magics, in particular remove magics that match
1336 1336 a name present in global namespace.
1337 1337 """
1338 1338 return ( magic.startswith(bare_text) and
1339 1339 magic not in global_matches )
1340 1340 else:
1341 1341 def matches(magic):
1342 1342 return magic.startswith(bare_text)
1343 1343
1344 1344 comp = [ pre2+m for m in cell_magics if matches(m)]
1345 1345 if not text.startswith(pre2):
1346 1346 comp += [ pre+m for m in line_magics if matches(m)]
1347 1347
1348 1348 return comp
1349 1349
1350 1350 def magic_config_matches(self, text:str) -> List[str]:
1351 1351 """ Match class names and attributes for %config magic """
1352 1352 texts = text.strip().split()
1353 1353
1354 1354 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
1355 1355 # get all configuration classes
1356 1356 classes = sorted(set([ c for c in self.shell.configurables
1357 1357 if c.__class__.class_traits(config=True)
1358 1358 ]), key=lambda x: x.__class__.__name__)
1359 1359 classnames = [ c.__class__.__name__ for c in classes ]
1360 1360
1361 1361 # return all classnames if config or %config is given
1362 1362 if len(texts) == 1:
1363 1363 return classnames
1364 1364
1365 1365 # match classname
1366 1366 classname_texts = texts[1].split('.')
1367 1367 classname = classname_texts[0]
1368 1368 classname_matches = [ c for c in classnames
1369 1369 if c.startswith(classname) ]
1370 1370
1371 1371 # return matched classes or the matched class with attributes
1372 1372 if texts[1].find('.') < 0:
1373 1373 return classname_matches
1374 1374 elif len(classname_matches) == 1 and \
1375 1375 classname_matches[0] == classname:
1376 1376 cls = classes[classnames.index(classname)].__class__
1377 1377 help = cls.class_get_help()
1378 1378 # strip leading '--' from cl-args:
1379 1379 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1380 1380 return [ attr.split('=')[0]
1381 1381 for attr in help.strip().splitlines()
1382 1382 if attr.startswith(texts[1]) ]
1383 1383 return []
1384 1384
1385 1385 def magic_color_matches(self, text:str) -> List[str] :
1386 1386 """ Match color schemes for %colors magic"""
1387 1387 texts = text.split()
1388 1388 if text.endswith(' '):
1389 1389 # .split() strips off the trailing whitespace. Add '' back
1390 1390 # so that: '%colors ' -> ['%colors', '']
1391 1391 texts.append('')
1392 1392
1393 1393 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
1394 1394 prefix = texts[1]
1395 1395 return [ color for color in InspectColors.keys()
1396 1396 if color.startswith(prefix) ]
1397 1397 return []
1398 1398
1399 1399 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str) -> Iterable[Any]:
1400 1400 """
1401 1401 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1402 1402 cursor position.
1403 1403
1404 1404 Parameters
1405 1405 ----------
1406 1406 cursor_column : int
1407 1407 column position of the cursor in ``text``, 0-indexed.
1408 1408 cursor_line : int
1409 1409 line position of the cursor in ``text``, 0-indexed
1410 1410 text : str
1411 1411 text to complete
1412 1412
1413 1413 Notes
1414 1414 -----
1415 1415 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1416 1416 object containing a string with the Jedi debug information attached.
1417 1417 """
1418 1418 namespaces = [self.namespace]
1419 1419 if self.global_namespace is not None:
1420 1420 namespaces.append(self.global_namespace)
1421 1421
1422 1422 completion_filter = lambda x:x
1423 1423 offset = cursor_to_position(text, cursor_line, cursor_column)
1424 1424 # filter output if we are completing for object members
1425 1425 if offset:
1426 1426 pre = text[offset-1]
1427 1427 if pre == '.':
1428 1428 if self.omit__names == 2:
1429 1429 completion_filter = lambda c:not c.name.startswith('_')
1430 1430 elif self.omit__names == 1:
1431 1431 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1432 1432 elif self.omit__names == 0:
1433 1433 completion_filter = lambda x:x
1434 1434 else:
1435 1435 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1436 1436
1437 1437 interpreter = jedi.Interpreter(text[:offset], namespaces)
1438 1438 try_jedi = True
1439 1439
1440 1440 try:
1441 1441 # find the first token in the current tree -- if it is a ' or " then we are in a string
1442 1442 completing_string = False
1443 1443 try:
1444 1444 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
1445 1445 except StopIteration:
1446 1446 pass
1447 1447 else:
1448 1448 # note the value may be ', ", or it may also be ''' or """, or
1449 1449 # in some cases, """what/you/typed..., but all of these are
1450 1450 # strings.
1451 1451 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
1452 1452
1453 1453 # if we are in a string jedi is likely not the right candidate for
1454 1454 # now. Skip it.
1455 1455 try_jedi = not completing_string
1456 1456 except Exception as e:
1457 1457 # many of things can go wrong, we are using private API just don't crash.
1458 1458 if self.debug:
1459 1459 print("Error detecting if completing a non-finished string :", e, '|')
1460 1460
1461 1461 if not try_jedi:
1462 1462 return []
1463 1463 try:
1464 1464 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
1465 1465 except Exception as e:
1466 1466 if self.debug:
1467 1467 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1468 1468 else:
1469 1469 return []
1470 1470
1471 1471 def python_matches(self, text:str)->List[str]:
1472 1472 """Match attributes or global python names"""
1473 1473 if "." in text:
1474 1474 try:
1475 1475 matches = self.attr_matches(text)
1476 1476 if text.endswith('.') and self.omit__names:
1477 1477 if self.omit__names == 1:
1478 1478 # true if txt is _not_ a __ name, false otherwise:
1479 1479 no__name = (lambda txt:
1480 1480 re.match(r'.*\.__.*?__',txt) is None)
1481 1481 else:
1482 1482 # true if txt is _not_ a _ name, false otherwise:
1483 1483 no__name = (lambda txt:
1484 1484 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1485 1485 matches = filter(no__name, matches)
1486 1486 except NameError:
1487 1487 # catches <undefined attributes>.<tab>
1488 1488 matches = []
1489 1489 else:
1490 1490 matches = self.global_matches(text)
1491 1491 return matches
1492 1492
1493 1493 def _default_arguments_from_docstring(self, doc):
1494 1494 """Parse the first line of docstring for call signature.
1495 1495
1496 1496 Docstring should be of the form 'min(iterable[, key=func])\n'.
1497 1497 It can also parse cython docstring of the form
1498 1498 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1499 1499 """
1500 1500 if doc is None:
1501 1501 return []
1502 1502
1503 1503 #care only the firstline
1504 1504 line = doc.lstrip().splitlines()[0]
1505 1505
1506 1506 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1507 1507 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1508 1508 sig = self.docstring_sig_re.search(line)
1509 1509 if sig is None:
1510 1510 return []
1511 1511 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1512 1512 sig = sig.groups()[0].split(',')
1513 1513 ret = []
1514 1514 for s in sig:
1515 1515 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1516 1516 ret += self.docstring_kwd_re.findall(s)
1517 1517 return ret
1518 1518
1519 1519 def _default_arguments(self, obj):
1520 1520 """Return the list of default arguments of obj if it is callable,
1521 1521 or empty list otherwise."""
1522 1522 call_obj = obj
1523 1523 ret = []
1524 1524 if inspect.isbuiltin(obj):
1525 1525 pass
1526 1526 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1527 1527 if inspect.isclass(obj):
1528 1528 #for cython embedsignature=True the constructor docstring
1529 1529 #belongs to the object itself not __init__
1530 1530 ret += self._default_arguments_from_docstring(
1531 1531 getattr(obj, '__doc__', ''))
1532 1532 # for classes, check for __init__,__new__
1533 1533 call_obj = (getattr(obj, '__init__', None) or
1534 1534 getattr(obj, '__new__', None))
1535 1535 # for all others, check if they are __call__able
1536 1536 elif hasattr(obj, '__call__'):
1537 1537 call_obj = obj.__call__
1538 1538 ret += self._default_arguments_from_docstring(
1539 1539 getattr(call_obj, '__doc__', ''))
1540 1540
1541 1541 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1542 1542 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1543 1543
1544 1544 try:
1545 1545 sig = inspect.signature(obj)
1546 1546 ret.extend(k for k, v in sig.parameters.items() if
1547 1547 v.kind in _keeps)
1548 1548 except ValueError:
1549 1549 pass
1550 1550
1551 1551 return list(set(ret))
1552 1552
1553 1553 def python_func_kw_matches(self, text):
1554 1554 """Match named parameters (kwargs) of the last open function"""
1555 1555
1556 1556 if "." in text: # a parameter cannot be dotted
1557 1557 return []
1558 1558 try: regexp = self.__funcParamsRegex
1559 1559 except AttributeError:
1560 1560 regexp = self.__funcParamsRegex = re.compile(r'''
1561 1561 '.*?(?<!\\)' | # single quoted strings or
1562 1562 ".*?(?<!\\)" | # double quoted strings or
1563 1563 \w+ | # identifier
1564 1564 \S # other characters
1565 1565 ''', re.VERBOSE | re.DOTALL)
1566 1566 # 1. find the nearest identifier that comes before an unclosed
1567 1567 # parenthesis before the cursor
1568 1568 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1569 1569 tokens = regexp.findall(self.text_until_cursor)
1570 1570 iterTokens = reversed(tokens); openPar = 0
1571 1571
1572 1572 for token in iterTokens:
1573 1573 if token == ')':
1574 1574 openPar -= 1
1575 1575 elif token == '(':
1576 1576 openPar += 1
1577 1577 if openPar > 0:
1578 1578 # found the last unclosed parenthesis
1579 1579 break
1580 1580 else:
1581 1581 return []
1582 1582 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1583 1583 ids = []
1584 1584 isId = re.compile(r'\w+$').match
1585 1585
1586 1586 while True:
1587 1587 try:
1588 1588 ids.append(next(iterTokens))
1589 1589 if not isId(ids[-1]):
1590 1590 ids.pop(); break
1591 1591 if not next(iterTokens) == '.':
1592 1592 break
1593 1593 except StopIteration:
1594 1594 break
1595 1595
1596 1596 # Find all named arguments already assigned to, as to avoid suggesting
1597 1597 # them again
1598 1598 usedNamedArgs = set()
1599 1599 par_level = -1
1600 1600 for token, next_token in zip(tokens, tokens[1:]):
1601 1601 if token == '(':
1602 1602 par_level += 1
1603 1603 elif token == ')':
1604 1604 par_level -= 1
1605 1605
1606 1606 if par_level != 0:
1607 1607 continue
1608 1608
1609 1609 if next_token != '=':
1610 1610 continue
1611 1611
1612 1612 usedNamedArgs.add(token)
1613 1613
1614 1614 argMatches = []
1615 1615 try:
1616 1616 callableObj = '.'.join(ids[::-1])
1617 1617 namedArgs = self._default_arguments(eval(callableObj,
1618 1618 self.namespace))
1619 1619
1620 1620 # Remove used named arguments from the list, no need to show twice
1621 1621 for namedArg in set(namedArgs) - usedNamedArgs:
1622 1622 if namedArg.startswith(text):
1623 1623 argMatches.append("%s=" %namedArg)
1624 1624 except:
1625 1625 pass
1626 1626
1627 1627 return argMatches
1628 1628
1629 1629 @staticmethod
1630 1630 def _get_keys(obj: Any) -> List[Any]:
1631 1631 # Objects can define their own completions by defining an
1632 1632 # _ipy_key_completions_() method.
1633 1633 method = get_real_method(obj, '_ipython_key_completions_')
1634 1634 if method is not None:
1635 1635 return method()
1636 1636
1637 1637 # Special case some common in-memory dict-like types
1638 1638 if isinstance(obj, dict) or\
1639 1639 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1640 1640 try:
1641 1641 return list(obj.keys())
1642 1642 except Exception:
1643 1643 return []
1644 1644 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1645 1645 _safe_isinstance(obj, 'numpy', 'void'):
1646 1646 return obj.dtype.names or []
1647 1647 return []
1648 1648
1649 1649 def dict_key_matches(self, text:str) -> List[str]:
1650 1650 "Match string keys in a dictionary, after e.g. 'foo[' "
1651 1651
1652 1652
1653 1653 if self.__dict_key_regexps is not None:
1654 1654 regexps = self.__dict_key_regexps
1655 1655 else:
1656 1656 dict_key_re_fmt = r'''(?x)
1657 1657 ( # match dict-referring expression wrt greedy setting
1658 1658 %s
1659 1659 )
1660 1660 \[ # open bracket
1661 1661 \s* # and optional whitespace
1662 1662 # Capture any number of str-like objects (e.g. "a", "b", 'c')
1663 1663 ((?:[uUbB]? # string prefix (r not handled)
1664 1664 (?:
1665 1665 '(?:[^']|(?<!\\)\\')*'
1666 1666 |
1667 1667 "(?:[^"]|(?<!\\)\\")*"
1668 1668 )
1669 1669 \s*,\s*
1670 1670 )*)
1671 1671 ([uUbB]? # string prefix (r not handled)
1672 1672 (?: # unclosed string
1673 1673 '(?:[^']|(?<!\\)\\')*
1674 1674 |
1675 1675 "(?:[^"]|(?<!\\)\\")*
1676 1676 )
1677 1677 )?
1678 1678 $
1679 1679 '''
1680 1680 regexps = self.__dict_key_regexps = {
1681 1681 False: re.compile(dict_key_re_fmt % r'''
1682 1682 # identifiers separated by .
1683 1683 (?!\d)\w+
1684 1684 (?:\.(?!\d)\w+)*
1685 1685 '''),
1686 1686 True: re.compile(dict_key_re_fmt % '''
1687 1687 .+
1688 1688 ''')
1689 1689 }
1690 1690
1691 1691 match = regexps[self.greedy].search(self.text_until_cursor)
1692 1692
1693 1693 if match is None:
1694 1694 return []
1695 1695
1696 1696 expr, prefix0, prefix = match.groups()
1697 1697 try:
1698 1698 obj = eval(expr, self.namespace)
1699 1699 except Exception:
1700 1700 try:
1701 1701 obj = eval(expr, self.global_namespace)
1702 1702 except Exception:
1703 1703 return []
1704 1704
1705 1705 keys = self._get_keys(obj)
1706 1706 if not keys:
1707 1707 return keys
1708 1708
1709 1709 extra_prefix = eval(prefix0) if prefix0 != '' else None
1710 1710
1711 1711 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims, extra_prefix=extra_prefix)
1712 1712 if not matches:
1713 1713 return matches
1714 1714
1715 1715 # get the cursor position of
1716 1716 # - the text being completed
1717 1717 # - the start of the key text
1718 1718 # - the start of the completion
1719 1719 text_start = len(self.text_until_cursor) - len(text)
1720 1720 if prefix:
1721 1721 key_start = match.start(3)
1722 1722 completion_start = key_start + token_offset
1723 1723 else:
1724 1724 key_start = completion_start = match.end()
1725 1725
1726 1726 # grab the leading prefix, to make sure all completions start with `text`
1727 1727 if text_start > key_start:
1728 1728 leading = ''
1729 1729 else:
1730 1730 leading = text[text_start:completion_start]
1731 1731
1732 1732 # the index of the `[` character
1733 1733 bracket_idx = match.end(1)
1734 1734
1735 1735 # append closing quote and bracket as appropriate
1736 1736 # this is *not* appropriate if the opening quote or bracket is outside
1737 1737 # the text given to this method
1738 1738 suf = ''
1739 1739 continuation = self.line_buffer[len(self.text_until_cursor):]
1740 1740 if key_start > text_start and closing_quote:
1741 1741 # quotes were opened inside text, maybe close them
1742 1742 if continuation.startswith(closing_quote):
1743 1743 continuation = continuation[len(closing_quote):]
1744 1744 else:
1745 1745 suf += closing_quote
1746 1746 if bracket_idx > text_start:
1747 1747 # brackets were opened inside text, maybe close them
1748 1748 if not continuation.startswith(']'):
1749 1749 suf += ']'
1750 1750
1751 1751 return [leading + k + suf for k in matches]
1752 1752
1753 1753 @staticmethod
1754 1754 def unicode_name_matches(text:str) -> Tuple[str, List[str]] :
1755 1755 """Match Latex-like syntax for unicode characters base
1756 1756 on the name of the character.
1757 1757
1758 1758 This does ``\\GREEK SMALL LETTER ETA`` -> ``η``
1759 1759
1760 1760 Works only on valid python 3 identifier, or on combining characters that
1761 1761 will combine to form a valid identifier.
1762 1762 """
1763 1763 slashpos = text.rfind('\\')
1764 1764 if slashpos > -1:
1765 1765 s = text[slashpos+1:]
1766 1766 try :
1767 1767 unic = unicodedata.lookup(s)
1768 1768 # allow combining chars
1769 1769 if ('a'+unic).isidentifier():
1770 1770 return '\\'+s,[unic]
1771 1771 except KeyError:
1772 1772 pass
1773 1773 return '', []
1774 1774
1775 1775
1776 1776 def latex_matches(self, text:str) -> Tuple[str, Sequence[str]]:
1777 1777 """Match Latex syntax for unicode characters.
1778 1778
1779 1779 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α``
1780 1780 """
1781 1781 slashpos = text.rfind('\\')
1782 1782 if slashpos > -1:
1783 1783 s = text[slashpos:]
1784 1784 if s in latex_symbols:
1785 1785 # Try to complete a full latex symbol to unicode
1786 1786 # \\alpha -> α
1787 1787 return s, [latex_symbols[s]]
1788 1788 else:
1789 1789 # If a user has partially typed a latex symbol, give them
1790 1790 # a full list of options \al -> [\aleph, \alpha]
1791 1791 matches = [k for k in latex_symbols if k.startswith(s)]
1792 1792 if matches:
1793 1793 return s, matches
1794 1794 return '', ()
1795 1795
1796 1796 def dispatch_custom_completer(self, text):
1797 1797 if not self.custom_completers:
1798 1798 return
1799 1799
1800 1800 line = self.line_buffer
1801 1801 if not line.strip():
1802 1802 return None
1803 1803
1804 1804 # Create a little structure to pass all the relevant information about
1805 1805 # the current completion to any custom completer.
1806 1806 event = SimpleNamespace()
1807 1807 event.line = line
1808 1808 event.symbol = text
1809 1809 cmd = line.split(None,1)[0]
1810 1810 event.command = cmd
1811 1811 event.text_until_cursor = self.text_until_cursor
1812 1812
1813 1813 # for foo etc, try also to find completer for %foo
1814 1814 if not cmd.startswith(self.magic_escape):
1815 1815 try_magic = self.custom_completers.s_matches(
1816 1816 self.magic_escape + cmd)
1817 1817 else:
1818 1818 try_magic = []
1819 1819
1820 1820 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1821 1821 try_magic,
1822 1822 self.custom_completers.flat_matches(self.text_until_cursor)):
1823 1823 try:
1824 1824 res = c(event)
1825 1825 if res:
1826 1826 # first, try case sensitive match
1827 1827 withcase = [r for r in res if r.startswith(text)]
1828 1828 if withcase:
1829 1829 return withcase
1830 1830 # if none, then case insensitive ones are ok too
1831 1831 text_low = text.lower()
1832 1832 return [r for r in res if r.lower().startswith(text_low)]
1833 1833 except TryNext:
1834 1834 pass
1835 1835 except KeyboardInterrupt:
1836 1836 """
1837 1837 If custom completer take too long,
1838 1838 let keyboard interrupt abort and return nothing.
1839 1839 """
1840 1840 break
1841 1841
1842 1842 return None
1843 1843
1844 1844 def completions(self, text: str, offset: int)->Iterator[Completion]:
1845 1845 """
1846 1846 Returns an iterator over the possible completions
1847 1847
1848 1848 .. warning::
1849 1849
1850 1850 Unstable
1851 1851
1852 1852 This function is unstable, API may change without warning.
1853 1853 It will also raise unless use in proper context manager.
1854 1854
1855 1855 Parameters
1856 1856 ----------
1857 1857 text : str
1858 1858 Full text of the current input, multi line string.
1859 1859 offset : int
1860 1860 Integer representing the position of the cursor in ``text``. Offset
1861 1861 is 0-based indexed.
1862 1862
1863 1863 Yields
1864 1864 ------
1865 1865 Completion
1866 1866
1867 1867 Notes
1868 1868 -----
1869 1869 The cursor on a text can either be seen as being "in between"
1870 1870 characters or "On" a character depending on the interface visible to
1871 1871 the user. For consistency the cursor being on "in between" characters X
1872 1872 and Y is equivalent to the cursor being "on" character Y, that is to say
1873 1873 the character the cursor is on is considered as being after the cursor.
1874 1874
1875 1875 Combining characters may span more that one position in the
1876 1876 text.
1877 1877
1878 1878 .. note::
1879 1879
1880 1880 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1881 1881 fake Completion token to distinguish completion returned by Jedi
1882 1882 and usual IPython completion.
1883 1883
1884 1884 .. note::
1885 1885
1886 1886 Completions are not completely deduplicated yet. If identical
1887 1887 completions are coming from different sources this function does not
1888 1888 ensure that each completion object will only be present once.
1889 1889 """
1890 1890 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1891 1891 "It may change without warnings. "
1892 1892 "Use in corresponding context manager.",
1893 1893 category=ProvisionalCompleterWarning, stacklevel=2)
1894 1894
1895 1895 seen = set()
1896 1896 profiler:Optional[cProfile.Profile]
1897 1897 try:
1898 1898 if self.profile_completions:
1899 1899 import cProfile
1900 1900 profiler = cProfile.Profile()
1901 1901 profiler.enable()
1902 1902 else:
1903 1903 profiler = None
1904 1904
1905 1905 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1906 1906 if c and (c in seen):
1907 1907 continue
1908 1908 yield c
1909 1909 seen.add(c)
1910 1910 except KeyboardInterrupt:
1911 1911 """if completions take too long and users send keyboard interrupt,
1912 1912 do not crash and return ASAP. """
1913 1913 pass
1914 1914 finally:
1915 1915 if profiler is not None:
1916 1916 profiler.disable()
1917 1917 ensure_dir_exists(self.profiler_output_dir)
1918 1918 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
1919 1919 print("Writing profiler output to", output_path)
1920 1920 profiler.dump_stats(output_path)
1921 1921
1922 1922 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
1923 1923 """
1924 1924 Core completion module.Same signature as :any:`completions`, with the
1925 1925 extra `timeout` parameter (in seconds).
1926 1926
1927 1927 Computing jedi's completion ``.type`` can be quite expensive (it is a
1928 1928 lazy property) and can require some warm-up, more warm up than just
1929 1929 computing the ``name`` of a completion. The warm-up can be :
1930 1930
1931 1931 - Long warm-up the first time a module is encountered after
1932 1932 install/update: actually build parse/inference tree.
1933 1933
1934 1934 - first time the module is encountered in a session: load tree from
1935 1935 disk.
1936 1936
1937 1937 We don't want to block completions for tens of seconds so we give the
1938 1938 completer a "budget" of ``_timeout`` seconds per invocation to compute
1939 1939 completions types, the completions that have not yet been computed will
1940 1940 be marked as "unknown" an will have a chance to be computed next round
1941 1941 are things get cached.
1942 1942
1943 1943 Keep in mind that Jedi is not the only thing treating the completion so
1944 1944 keep the timeout short-ish as if we take more than 0.3 second we still
1945 1945 have lots of processing to do.
1946 1946
1947 1947 """
1948 1948 deadline = time.monotonic() + _timeout
1949 1949
1950 1950
1951 1951 before = full_text[:offset]
1952 1952 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1953 1953
1954 1954 matched_text, matches, matches_origin, jedi_matches = self._complete(
1955 1955 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1956 1956
1957 1957 iter_jm = iter(jedi_matches)
1958 1958 if _timeout:
1959 1959 for jm in iter_jm:
1960 1960 try:
1961 1961 type_ = jm.type
1962 1962 except Exception:
1963 1963 if self.debug:
1964 1964 print("Error in Jedi getting type of ", jm)
1965 1965 type_ = None
1966 1966 delta = len(jm.name_with_symbols) - len(jm.complete)
1967 1967 if type_ == 'function':
1968 1968 signature = _make_signature(jm)
1969 1969 else:
1970 1970 signature = ''
1971 1971 yield Completion(start=offset - delta,
1972 1972 end=offset,
1973 1973 text=jm.name_with_symbols,
1974 1974 type=type_,
1975 1975 signature=signature,
1976 1976 _origin='jedi')
1977 1977
1978 1978 if time.monotonic() > deadline:
1979 1979 break
1980 1980
1981 1981 for jm in iter_jm:
1982 1982 delta = len(jm.name_with_symbols) - len(jm.complete)
1983 1983 yield Completion(start=offset - delta,
1984 1984 end=offset,
1985 1985 text=jm.name_with_symbols,
1986 1986 type='<unknown>', # don't compute type for speed
1987 1987 _origin='jedi',
1988 1988 signature='')
1989 1989
1990 1990
1991 1991 start_offset = before.rfind(matched_text)
1992 1992
1993 1993 # TODO:
1994 1994 # Suppress this, right now just for debug.
1995 1995 if jedi_matches and matches and self.debug:
1996 1996 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--',
1997 1997 _origin='debug', type='none', signature='')
1998 1998
1999 1999 # I'm unsure if this is always true, so let's assert and see if it
2000 2000 # crash
2001 2001 assert before.endswith(matched_text)
2002 2002 for m, t in zip(matches, matches_origin):
2003 2003 yield Completion(start=start_offset, end=offset, text=m, _origin=t, signature='', type='<unknown>')
2004 2004
2005 2005
2006 2006 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
2007 2007 """Find completions for the given text and line context.
2008 2008
2009 2009 Note that both the text and the line_buffer are optional, but at least
2010 2010 one of them must be given.
2011 2011
2012 2012 Parameters
2013 2013 ----------
2014 2014 text : string, optional
2015 2015 Text to perform the completion on. If not given, the line buffer
2016 2016 is split using the instance's CompletionSplitter object.
2017 2017 line_buffer : string, optional
2018 2018 If not given, the completer attempts to obtain the current line
2019 2019 buffer via readline. This keyword allows clients which are
2020 2020 requesting for text completions in non-readline contexts to inform
2021 2021 the completer of the entire text.
2022 2022 cursor_pos : int, optional
2023 2023 Index of the cursor in the full line buffer. Should be provided by
2024 2024 remote frontends where kernel has no access to frontend state.
2025 2025
2026 2026 Returns
2027 2027 -------
2028 2028 Tuple of two items:
2029 2029 text : str
2030 2030 Text that was actually used in the completion.
2031 2031 matches : list
2032 2032 A list of completion matches.
2033 2033
2034 2034 Notes
2035 2035 -----
2036 2036 This API is likely to be deprecated and replaced by
2037 2037 :any:`IPCompleter.completions` in the future.
2038 2038
2039 2039 """
2040 2040 warnings.warn('`Completer.complete` is pending deprecation since '
2041 2041 'IPython 6.0 and will be replaced by `Completer.completions`.',
2042 2042 PendingDeprecationWarning)
2043 2043 # potential todo, FOLD the 3rd throw away argument of _complete
2044 2044 # into the first 2 one.
2045 2045 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
2046 2046
2047 2047 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
2048 2048 full_text=None) -> _CompleteResult:
2049 2049 """
2050 2050 Like complete but can also returns raw jedi completions as well as the
2051 2051 origin of the completion text. This could (and should) be made much
2052 2052 cleaner but that will be simpler once we drop the old (and stateful)
2053 2053 :any:`complete` API.
2054 2054
2055 2055 With current provisional API, cursor_pos act both (depending on the
2056 2056 caller) as the offset in the ``text`` or ``line_buffer``, or as the
2057 2057 ``column`` when passing multiline strings this could/should be renamed
2058 2058 but would add extra noise.
2059 2059
2060 2060 Returns
2061 2061 -------
2062 2062 A tuple of N elements which are (likely):
2063 2063 matched_text: ? the text that the complete matched
2064 2064 matches: list of completions ?
2065 2065 matches_origin: ? list same lenght as matches, and where each completion came from
2066 2066 jedi_matches: list of Jedi matches, have it's own structure.
2067 2067 """
2068 2068
2069 2069
2070 2070 # if the cursor position isn't given, the only sane assumption we can
2071 2071 # make is that it's at the end of the line (the common case)
2072 2072 if cursor_pos is None:
2073 2073 cursor_pos = len(line_buffer) if text is None else len(text)
2074 2074
2075 2075 if self.use_main_ns:
2076 2076 self.namespace = __main__.__dict__
2077 2077
2078 2078 # if text is either None or an empty string, rely on the line buffer
2079 2079 if (not line_buffer) and full_text:
2080 2080 line_buffer = full_text.split('\n')[cursor_line]
2081 2081 if not text: # issue #11508: check line_buffer before calling split_line
2082 2082 text = self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ''
2083 2083
2084 2084 if self.backslash_combining_completions:
2085 2085 # allow deactivation of these on windows.
2086 2086 base_text = text if not line_buffer else line_buffer[:cursor_pos]
2087 2087
2088 2088 for meth in (self.latex_matches,
2089 2089 self.unicode_name_matches,
2090 2090 back_latex_name_matches,
2091 2091 back_unicode_name_matches,
2092 2092 self.fwd_unicode_match):
2093 2093 name_text, name_matches = meth(base_text)
2094 2094 if name_text:
2095 2095 return _CompleteResult(name_text, name_matches[:MATCHES_LIMIT], \
2096 2096 [meth.__qualname__]*min(len(name_matches), MATCHES_LIMIT), ())
2097 2097
2098 2098
2099 2099 # If no line buffer is given, assume the input text is all there was
2100 2100 if line_buffer is None:
2101 2101 line_buffer = text
2102 2102
2103 2103 self.line_buffer = line_buffer
2104 2104 self.text_until_cursor = self.line_buffer[:cursor_pos]
2105 2105
2106 2106 # Do magic arg matches
2107 2107 for matcher in self.magic_arg_matchers:
2108 2108 matches = list(matcher(line_buffer))[:MATCHES_LIMIT]
2109 2109 if matches:
2110 2110 origins = [matcher.__qualname__] * len(matches)
2111 2111 return _CompleteResult(text, matches, origins, ())
2112 2112
2113 2113 # Start with a clean slate of completions
2114 2114 matches = []
2115 2115
2116 2116 # FIXME: we should extend our api to return a dict with completions for
2117 2117 # different types of objects. The rlcomplete() method could then
2118 2118 # simply collapse the dict into a list for readline, but we'd have
2119 2119 # richer completion semantics in other environments.
2120 2120 completions:Iterable[Any] = []
2121 2121 if self.use_jedi:
2122 2122 if not full_text:
2123 2123 full_text = line_buffer
2124 2124 completions = self._jedi_matches(
2125 2125 cursor_pos, cursor_line, full_text)
2126 2126
2127 2127 if self.merge_completions:
2128 2128 matches = []
2129 2129 for matcher in self.matchers:
2130 2130 try:
2131 2131 matches.extend([(m, matcher.__qualname__)
2132 2132 for m in matcher(text)])
2133 2133 except:
2134 2134 # Show the ugly traceback if the matcher causes an
2135 2135 # exception, but do NOT crash the kernel!
2136 2136 sys.excepthook(*sys.exc_info())
2137 2137 else:
2138 2138 for matcher in self.matchers:
2139 2139 matches = [(m, matcher.__qualname__)
2140 2140 for m in matcher(text)]
2141 2141 if matches:
2142 2142 break
2143 2143
2144 2144 seen = set()
2145 2145 filtered_matches = set()
2146 2146 for m in matches:
2147 2147 t, c = m
2148 2148 if t not in seen:
2149 2149 filtered_matches.add(m)
2150 2150 seen.add(t)
2151 2151
2152 2152 _filtered_matches = sorted(filtered_matches, key=lambda x: completions_sorting_key(x[0]))
2153 2153
2154 2154 custom_res = [(m, 'custom') for m in self.dispatch_custom_completer(text) or []]
2155 2155
2156 2156 _filtered_matches = custom_res or _filtered_matches
2157 2157
2158 2158 _filtered_matches = _filtered_matches[:MATCHES_LIMIT]
2159 2159 _matches = [m[0] for m in _filtered_matches]
2160 2160 origins = [m[1] for m in _filtered_matches]
2161 2161
2162 2162 self.matches = _matches
2163 2163
2164 2164 return _CompleteResult(text, _matches, origins, completions)
2165 2165
2166 2166 def fwd_unicode_match(self, text:str) -> Tuple[str, Sequence[str]]:
2167 2167 """
2168 2168 Forward match a string starting with a backslash with a list of
2169 2169 potential Unicode completions.
2170 2170
2171 2171 Will compute list list of Unicode character names on first call and cache it.
2172 2172
2173 2173 Returns
2174 2174 -------
2175 2175 At tuple with:
2176 2176 - matched text (empty if no matches)
2177 2177 - list of potential completions, empty tuple otherwise)
2178 2178 """
2179 2179 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
2180 2180 # We could do a faster match using a Trie.
2181 2181
2182 2182 # Using pygtrie the follwing seem to work:
2183 2183
2184 2184 # s = PrefixSet()
2185 2185
2186 2186 # for c in range(0,0x10FFFF + 1):
2187 2187 # try:
2188 2188 # s.add(unicodedata.name(chr(c)))
2189 2189 # except ValueError:
2190 2190 # pass
2191 2191 # [''.join(k) for k in s.iter(prefix)]
2192 2192
2193 2193 # But need to be timed and adds an extra dependency.
2194 2194
2195 2195 slashpos = text.rfind('\\')
2196 2196 # if text starts with slash
2197 2197 if slashpos > -1:
2198 2198 # PERF: It's important that we don't access self._unicode_names
2199 2199 # until we're inside this if-block. _unicode_names is lazily
2200 2200 # initialized, and it takes a user-noticeable amount of time to
2201 2201 # initialize it, so we don't want to initialize it unless we're
2202 2202 # actually going to use it.
2203 2203 s = text[slashpos+1:]
2204 2204 candidates = [x for x in self.unicode_names if x.startswith(s)]
2205 2205 if candidates:
2206 2206 return s, candidates
2207 2207 else:
2208 2208 return '', ()
2209 2209
2210 2210 # if text does not start with slash
2211 2211 else:
2212 2212 return '', ()
2213 2213
2214 2214 @property
2215 2215 def unicode_names(self) -> List[str]:
2216 2216 """List of names of unicode code points that can be completed.
2217 2217
2218 2218 The list is lazily initialized on first access.
2219 2219 """
2220 2220 if self._unicode_names is None:
2221 2221 names = []
2222 2222 for c in range(0,0x10FFFF + 1):
2223 2223 try:
2224 2224 names.append(unicodedata.name(chr(c)))
2225 2225 except ValueError:
2226 2226 pass
2227 2227 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
2228 2228
2229 2229 return self._unicode_names
2230 2230
2231 2231 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
2232 2232 names = []
2233 2233 for start,stop in ranges:
2234 2234 for c in range(start, stop) :
2235 2235 try:
2236 2236 names.append(unicodedata.name(chr(c)))
2237 2237 except ValueError:
2238 2238 pass
2239 2239 return names
@@ -1,1106 +1,1108 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Pdb debugger class.
4 4
5 5
6 6 This is an extension to PDB which adds a number of new features.
7 7 Note that there is also the `IPython.terminal.debugger` class which provides UI
8 8 improvements.
9 9
10 10 We also strongly recommend to use this via the `ipdb` package, which provides
11 11 extra configuration options.
12 12
13 13 Among other things, this subclass of PDB:
14 14 - supports many IPython magics like pdef/psource
15 15 - hide frames in tracebacks based on `__tracebackhide__`
16 16 - allows to skip frames based on `__debuggerskip__`
17 17
18 18 The skipping and hiding frames are configurable via the `skip_predicates`
19 19 command.
20 20
21 21 By default, frames from readonly files will be hidden, frames containing
22 22 ``__tracebackhide__=True`` will be hidden.
23 23
24 24 Frames containing ``__debuggerskip__`` will be stepped over, frames who's parent
25 25 frames value of ``__debuggerskip__`` is ``True`` will be skipped.
26 26
27 27 >>> def helpers_helper():
28 28 ... pass
29 29 ...
30 30 ... def helper_1():
31 31 ... print("don't step in me")
32 32 ... helpers_helpers() # will be stepped over unless breakpoint set.
33 33 ...
34 34 ...
35 35 ... def helper_2():
36 36 ... print("in me neither")
37 37 ...
38 38
39 39 One can define a decorator that wraps a function between the two helpers:
40 40
41 41 >>> def pdb_skipped_decorator(function):
42 42 ...
43 43 ...
44 44 ... def wrapped_fn(*args, **kwargs):
45 45 ... __debuggerskip__ = True
46 46 ... helper_1()
47 47 ... __debuggerskip__ = False
48 48 ... result = function(*args, **kwargs)
49 49 ... __debuggerskip__ = True
50 50 ... helper_2()
51 51 ... # setting __debuggerskip__ to False again is not necessary
52 52 ... return result
53 53 ...
54 54 ... return wrapped_fn
55 55
56 56 When decorating a function, ipdb will directly step into ``bar()`` by
57 57 default:
58 58
59 59 >>> @foo_decorator
60 60 ... def bar(x, y):
61 61 ... return x * y
62 62
63 63
64 64 You can toggle the behavior with
65 65
66 66 ipdb> skip_predicates debuggerskip false
67 67
68 68 or configure it in your ``.pdbrc``
69 69
70 70
71 71
72 72 Licencse
73 73 --------
74 74
75 75 Modified from the standard pdb.Pdb class to avoid including readline, so that
76 76 the command line completion of other programs which include this isn't
77 77 damaged.
78 78
79 79 In the future, this class will be expanded with improvements over the standard
80 80 pdb.
81 81
82 82 The original code in this file is mainly lifted out of cmd.py in Python 2.2,
83 83 with minor changes. Licensing should therefore be under the standard Python
84 84 terms. For details on the PSF (Python Software Foundation) standard license,
85 85 see:
86 86
87 87 https://docs.python.org/2/license.html
88 88
89 89
90 90 All the changes since then are under the same license as IPython.
91 91
92 92 """
93 93
94 94 #*****************************************************************************
95 95 #
96 96 # This file is licensed under the PSF license.
97 97 #
98 98 # Copyright (C) 2001 Python Software Foundation, www.python.org
99 99 # Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
100 100 #
101 101 #
102 102 #*****************************************************************************
103 103
104 104 import bdb
105 105 import functools
106 106 import inspect
107 107 import linecache
108 108 import sys
109 109 import warnings
110 110 import re
111 111 import os
112 112
113 113 from IPython import get_ipython
114 114 from IPython.utils import PyColorize
115 115 from IPython.utils import coloransi, py3compat
116 116 from IPython.core.excolors import exception_colors
117 117 from IPython.testing.skipdoctest import skip_doctest
118 118
119 # skip module docstests
120 __skip_doctest__ = True
119 121
120 122 prompt = 'ipdb> '
121 123
122 124 # We have to check this directly from sys.argv, config struct not yet available
123 125 from pdb import Pdb as OldPdb
124 126
125 127 # Allow the set_trace code to operate outside of an ipython instance, even if
126 128 # it does so with some limitations. The rest of this support is implemented in
127 129 # the Tracer constructor.
128 130
129 131 DEBUGGERSKIP = "__debuggerskip__"
130 132
131 133
132 134 def make_arrow(pad):
133 135 """generate the leading arrow in front of traceback or debugger"""
134 136 if pad >= 2:
135 137 return '-'*(pad-2) + '> '
136 138 elif pad == 1:
137 139 return '>'
138 140 return ''
139 141
140 142
141 143 def BdbQuit_excepthook(et, ev, tb, excepthook=None):
142 144 """Exception hook which handles `BdbQuit` exceptions.
143 145
144 146 All other exceptions are processed using the `excepthook`
145 147 parameter.
146 148 """
147 149 warnings.warn("`BdbQuit_excepthook` is deprecated since version 5.1",
148 150 DeprecationWarning, stacklevel=2)
149 151 if et == bdb.BdbQuit:
150 152 print('Exiting Debugger.')
151 153 elif excepthook is not None:
152 154 excepthook(et, ev, tb)
153 155 else:
154 156 # Backwards compatibility. Raise deprecation warning?
155 157 BdbQuit_excepthook.excepthook_ori(et, ev, tb)
156 158
157 159
158 160 def BdbQuit_IPython_excepthook(self, et, ev, tb, tb_offset=None):
159 161 warnings.warn(
160 162 "`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
161 163 DeprecationWarning, stacklevel=2)
162 164 print('Exiting Debugger.')
163 165
164 166
165 167 class Tracer(object):
166 168 """
167 169 DEPRECATED
168 170
169 171 Class for local debugging, similar to pdb.set_trace.
170 172
171 173 Instances of this class, when called, behave like pdb.set_trace, but
172 174 providing IPython's enhanced capabilities.
173 175
174 176 This is implemented as a class which must be initialized in your own code
175 177 and not as a standalone function because we need to detect at runtime
176 178 whether IPython is already active or not. That detection is done in the
177 179 constructor, ensuring that this code plays nicely with a running IPython,
178 180 while functioning acceptably (though with limitations) if outside of it.
179 181 """
180 182
181 183 @skip_doctest
182 184 def __init__(self, colors=None):
183 185 """
184 186 DEPRECATED
185 187
186 188 Create a local debugger instance.
187 189
188 190 Parameters
189 191 ----------
190 192 colors : str, optional
191 193 The name of the color scheme to use, it must be one of IPython's
192 194 valid color schemes. If not given, the function will default to
193 195 the current IPython scheme when running inside IPython, and to
194 196 'NoColor' otherwise.
195 197
196 198 Examples
197 199 --------
198 200 ::
199 201
200 202 from IPython.core.debugger import Tracer; debug_here = Tracer()
201 203
202 204 Later in your code::
203 205
204 206 debug_here() # -> will open up the debugger at that point.
205 207
206 208 Once the debugger activates, you can use all of its regular commands to
207 209 step through code, set breakpoints, etc. See the pdb documentation
208 210 from the Python standard library for usage details.
209 211 """
210 212 warnings.warn("`Tracer` is deprecated since version 5.1, directly use "
211 213 "`IPython.core.debugger.Pdb.set_trace()`",
212 214 DeprecationWarning, stacklevel=2)
213 215
214 216 ip = get_ipython()
215 217 if ip is None:
216 218 # Outside of ipython, we set our own exception hook manually
217 219 sys.excepthook = functools.partial(BdbQuit_excepthook,
218 220 excepthook=sys.excepthook)
219 221 def_colors = 'NoColor'
220 222 else:
221 223 # In ipython, we use its custom exception handler mechanism
222 224 def_colors = ip.colors
223 225 ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
224 226
225 227 if colors is None:
226 228 colors = def_colors
227 229
228 230 # The stdlib debugger internally uses a modified repr from the `repr`
229 231 # module, that limits the length of printed strings to a hardcoded
230 232 # limit of 30 characters. That much trimming is too aggressive, let's
231 233 # at least raise that limit to 80 chars, which should be enough for
232 234 # most interactive uses.
233 235 try:
234 236 from reprlib import aRepr
235 237 aRepr.maxstring = 80
236 238 except:
237 239 # This is only a user-facing convenience, so any error we encounter
238 240 # here can be warned about but can be otherwise ignored. These
239 241 # printouts will tell us about problems if this API changes
240 242 import traceback
241 243 traceback.print_exc()
242 244
243 245 self.debugger = Pdb(colors)
244 246
245 247 def __call__(self):
246 248 """Starts an interactive debugger at the point where called.
247 249
248 250 This is similar to the pdb.set_trace() function from the std lib, but
249 251 using IPython's enhanced debugger."""
250 252
251 253 self.debugger.set_trace(sys._getframe().f_back)
252 254
253 255
254 256 RGX_EXTRA_INDENT = re.compile(r'(?<=\n)\s+')
255 257
256 258
257 259 def strip_indentation(multiline_string):
258 260 return RGX_EXTRA_INDENT.sub('', multiline_string)
259 261
260 262
261 263 def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
262 264 """Make new_fn have old_fn's doc string. This is particularly useful
263 265 for the ``do_...`` commands that hook into the help system.
264 266 Adapted from from a comp.lang.python posting
265 267 by Duncan Booth."""
266 268 def wrapper(*args, **kw):
267 269 return new_fn(*args, **kw)
268 270 if old_fn.__doc__:
269 271 wrapper.__doc__ = strip_indentation(old_fn.__doc__) + additional_text
270 272 return wrapper
271 273
272 274
273 275 class Pdb(OldPdb):
274 276 """Modified Pdb class, does not load readline.
275 277
276 278 for a standalone version that uses prompt_toolkit, see
277 279 `IPython.terminal.debugger.TerminalPdb` and
278 280 `IPython.terminal.debugger.set_trace()`
279 281
280 282
281 283 This debugger can hide and skip frames that are tagged according to some predicates.
282 284 See the `skip_predicates` commands.
283 285
284 286 """
285 287
286 288 default_predicates = {
287 289 "tbhide": True,
288 290 "readonly": False,
289 291 "ipython_internal": True,
290 292 "debuggerskip": True,
291 293 }
292 294
293 295 def __init__(self, color_scheme=None, completekey=None,
294 296 stdin=None, stdout=None, context=5, **kwargs):
295 297 """Create a new IPython debugger.
296 298
297 299 Parameters
298 300 ----------
299 301 color_scheme : default None
300 302 Deprecated, do not use.
301 303 completekey : default None
302 304 Passed to pdb.Pdb.
303 305 stdin : default None
304 306 Passed to pdb.Pdb.
305 307 stdout : default None
306 308 Passed to pdb.Pdb.
307 309 context : int
308 310 Number of lines of source code context to show when
309 311 displaying stacktrace information.
310 312 **kwargs
311 313 Passed to pdb.Pdb.
312 314
313 315 Notes
314 316 -----
315 317 The possibilities are python version dependent, see the python
316 318 docs for more info.
317 319 """
318 320
319 321 # Parent constructor:
320 322 try:
321 323 self.context = int(context)
322 324 if self.context <= 0:
323 325 raise ValueError("Context must be a positive integer")
324 326 except (TypeError, ValueError) as e:
325 327 raise ValueError("Context must be a positive integer") from e
326 328
327 329 # `kwargs` ensures full compatibility with stdlib's `pdb.Pdb`.
328 330 OldPdb.__init__(self, completekey, stdin, stdout, **kwargs)
329 331
330 332 # IPython changes...
331 333 self.shell = get_ipython()
332 334
333 335 if self.shell is None:
334 336 save_main = sys.modules['__main__']
335 337 # No IPython instance running, we must create one
336 338 from IPython.terminal.interactiveshell import \
337 339 TerminalInteractiveShell
338 340 self.shell = TerminalInteractiveShell.instance()
339 341 # needed by any code which calls __import__("__main__") after
340 342 # the debugger was entered. See also #9941.
341 343 sys.modules["__main__"] = save_main
342 344
343 345 if color_scheme is not None:
344 346 warnings.warn(
345 347 "The `color_scheme` argument is deprecated since version 5.1",
346 348 DeprecationWarning, stacklevel=2)
347 349 else:
348 350 color_scheme = self.shell.colors
349 351
350 352 self.aliases = {}
351 353
352 354 # Create color table: we copy the default one from the traceback
353 355 # module and add a few attributes needed for debugging
354 356 self.color_scheme_table = exception_colors()
355 357
356 358 # shorthands
357 359 C = coloransi.TermColors
358 360 cst = self.color_scheme_table
359 361
360 362 cst['NoColor'].colors.prompt = C.NoColor
361 363 cst['NoColor'].colors.breakpoint_enabled = C.NoColor
362 364 cst['NoColor'].colors.breakpoint_disabled = C.NoColor
363 365
364 366 cst['Linux'].colors.prompt = C.Green
365 367 cst['Linux'].colors.breakpoint_enabled = C.LightRed
366 368 cst['Linux'].colors.breakpoint_disabled = C.Red
367 369
368 370 cst['LightBG'].colors.prompt = C.Blue
369 371 cst['LightBG'].colors.breakpoint_enabled = C.LightRed
370 372 cst['LightBG'].colors.breakpoint_disabled = C.Red
371 373
372 374 cst['Neutral'].colors.prompt = C.Blue
373 375 cst['Neutral'].colors.breakpoint_enabled = C.LightRed
374 376 cst['Neutral'].colors.breakpoint_disabled = C.Red
375 377
376 378 # Add a python parser so we can syntax highlight source while
377 379 # debugging.
378 380 self.parser = PyColorize.Parser(style=color_scheme)
379 381 self.set_colors(color_scheme)
380 382
381 383 # Set the prompt - the default prompt is '(Pdb)'
382 384 self.prompt = prompt
383 385 self.skip_hidden = True
384 386 self.report_skipped = True
385 387
386 388 # list of predicates we use to skip frames
387 389 self._predicates = self.default_predicates
388 390
389 391 #
390 392 def set_colors(self, scheme):
391 393 """Shorthand access to the color table scheme selector method."""
392 394 self.color_scheme_table.set_active_scheme(scheme)
393 395 self.parser.style = scheme
394 396
395 397 def set_trace(self, frame=None):
396 398 if frame is None:
397 399 frame = sys._getframe().f_back
398 400 self.initial_frame = frame
399 401 return super().set_trace(frame)
400 402
401 403 def _hidden_predicate(self, frame):
402 404 """
403 405 Given a frame return whether it it should be hidden or not by IPython.
404 406 """
405 407
406 408 if self._predicates["readonly"]:
407 409 fname = frame.f_code.co_filename
408 410 # we need to check for file existence and interactively define
409 411 # function would otherwise appear as RO.
410 412 if os.path.isfile(fname) and not os.access(fname, os.W_OK):
411 413 return True
412 414
413 415 if self._predicates["tbhide"]:
414 416 if frame in (self.curframe, getattr(self, "initial_frame", None)):
415 417 return False
416 418 else:
417 419 return self._get_frame_locals(frame).get("__tracebackhide__", False)
418 420
419 421 return False
420 422
421 423 def hidden_frames(self, stack):
422 424 """
423 425 Given an index in the stack return whether it should be skipped.
424 426
425 427 This is used in up/down and where to skip frames.
426 428 """
427 429 # The f_locals dictionary is updated from the actual frame
428 430 # locals whenever the .f_locals accessor is called, so we
429 431 # avoid calling it here to preserve self.curframe_locals.
430 432 # Futhermore, there is no good reason to hide the current frame.
431 433 ip_hide = [self._hidden_predicate(s[0]) for s in stack]
432 434 ip_start = [i for i, s in enumerate(ip_hide) if s == "__ipython_bottom__"]
433 435 if ip_start and self._predicates["ipython_internal"]:
434 436 ip_hide = [h if i > ip_start[0] else True for (i, h) in enumerate(ip_hide)]
435 437 return ip_hide
436 438
437 439 def interaction(self, frame, traceback):
438 440 try:
439 441 OldPdb.interaction(self, frame, traceback)
440 442 except KeyboardInterrupt:
441 443 self.stdout.write("\n" + self.shell.get_exception_only())
442 444
443 445 def precmd(self, line):
444 446 """Perform useful escapes on the command before it is executed."""
445 447
446 448 if line.endswith("??"):
447 449 line = "pinfo2 " + line[:-2]
448 450 elif line.endswith("?"):
449 451 line = "pinfo " + line[:-1]
450 452
451 453 line = super().precmd(line)
452 454
453 455 return line
454 456
455 457 def new_do_frame(self, arg):
456 458 OldPdb.do_frame(self, arg)
457 459
458 460 def new_do_quit(self, arg):
459 461
460 462 if hasattr(self, 'old_all_completions'):
461 463 self.shell.Completer.all_completions = self.old_all_completions
462 464
463 465 return OldPdb.do_quit(self, arg)
464 466
465 467 do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
466 468
467 469 def new_do_restart(self, arg):
468 470 """Restart command. In the context of ipython this is exactly the same
469 471 thing as 'quit'."""
470 472 self.msg("Restart doesn't make sense here. Using 'quit' instead.")
471 473 return self.do_quit(arg)
472 474
473 475 def print_stack_trace(self, context=None):
474 476 Colors = self.color_scheme_table.active_colors
475 477 ColorsNormal = Colors.Normal
476 478 if context is None:
477 479 context = self.context
478 480 try:
479 481 context = int(context)
480 482 if context <= 0:
481 483 raise ValueError("Context must be a positive integer")
482 484 except (TypeError, ValueError) as e:
483 485 raise ValueError("Context must be a positive integer") from e
484 486 try:
485 487 skipped = 0
486 488 for hidden, frame_lineno in zip(self.hidden_frames(self.stack), self.stack):
487 489 if hidden and self.skip_hidden:
488 490 skipped += 1
489 491 continue
490 492 if skipped:
491 493 print(
492 494 f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n"
493 495 )
494 496 skipped = 0
495 497 self.print_stack_entry(frame_lineno, context=context)
496 498 if skipped:
497 499 print(
498 500 f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n"
499 501 )
500 502 except KeyboardInterrupt:
501 503 pass
502 504
503 505 def print_stack_entry(self, frame_lineno, prompt_prefix='\n-> ',
504 506 context=None):
505 507 if context is None:
506 508 context = self.context
507 509 try:
508 510 context = int(context)
509 511 if context <= 0:
510 512 raise ValueError("Context must be a positive integer")
511 513 except (TypeError, ValueError) as e:
512 514 raise ValueError("Context must be a positive integer") from e
513 515 print(self.format_stack_entry(frame_lineno, '', context), file=self.stdout)
514 516
515 517 # vds: >>
516 518 frame, lineno = frame_lineno
517 519 filename = frame.f_code.co_filename
518 520 self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
519 521 # vds: <<
520 522
521 523 def _get_frame_locals(self, frame):
522 524 """ "
523 525 Acessing f_local of current frame reset the namespace, so we want to avoid
524 526 that or the following can happend
525 527
526 528 ipdb> foo
527 529 "old"
528 530 ipdb> foo = "new"
529 531 ipdb> foo
530 532 "new"
531 533 ipdb> where
532 534 ipdb> foo
533 535 "old"
534 536
535 537 So if frame is self.current_frame we instead return self.curframe_locals
536 538
537 539 """
538 540 if frame is self.curframe:
539 541 return self.curframe_locals
540 542 else:
541 543 return frame.f_locals
542 544
543 545 def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
544 546 if context is None:
545 547 context = self.context
546 548 try:
547 549 context = int(context)
548 550 if context <= 0:
549 551 print("Context must be a positive integer", file=self.stdout)
550 552 except (TypeError, ValueError):
551 553 print("Context must be a positive integer", file=self.stdout)
552 554
553 555 import reprlib
554 556
555 557 ret = []
556 558
557 559 Colors = self.color_scheme_table.active_colors
558 560 ColorsNormal = Colors.Normal
559 561 tpl_link = "%s%%s%s" % (Colors.filenameEm, ColorsNormal)
560 562 tpl_call = "%s%%s%s%%s%s" % (Colors.vName, Colors.valEm, ColorsNormal)
561 563 tpl_line = "%%s%s%%s %s%%s" % (Colors.lineno, ColorsNormal)
562 564 tpl_line_em = "%%s%s%%s %s%%s%s" % (Colors.linenoEm, Colors.line, ColorsNormal)
563 565
564 566 frame, lineno = frame_lineno
565 567
566 568 return_value = ''
567 569 loc_frame = self._get_frame_locals(frame)
568 570 if "__return__" in loc_frame:
569 571 rv = loc_frame["__return__"]
570 572 # return_value += '->'
571 573 return_value += reprlib.repr(rv) + "\n"
572 574 ret.append(return_value)
573 575
574 576 #s = filename + '(' + `lineno` + ')'
575 577 filename = self.canonic(frame.f_code.co_filename)
576 578 link = tpl_link % py3compat.cast_unicode(filename)
577 579
578 580 if frame.f_code.co_name:
579 581 func = frame.f_code.co_name
580 582 else:
581 583 func = "<lambda>"
582 584
583 585 call = ""
584 586 if func != "?":
585 587 if "__args__" in loc_frame:
586 588 args = reprlib.repr(loc_frame["__args__"])
587 589 else:
588 590 args = '()'
589 591 call = tpl_call % (func, args)
590 592
591 593 # The level info should be generated in the same format pdb uses, to
592 594 # avoid breaking the pdbtrack functionality of python-mode in *emacs.
593 595 if frame is self.curframe:
594 596 ret.append('> ')
595 597 else:
596 598 ret.append(" ")
597 599 ret.append("%s(%s)%s\n" % (link, lineno, call))
598 600
599 601 start = lineno - 1 - context//2
600 602 lines = linecache.getlines(filename)
601 603 start = min(start, len(lines) - context)
602 604 start = max(start, 0)
603 605 lines = lines[start : start + context]
604 606
605 607 for i, line in enumerate(lines):
606 608 show_arrow = start + 1 + i == lineno
607 609 linetpl = (frame is self.curframe or show_arrow) and tpl_line_em or tpl_line
608 610 ret.append(
609 611 self.__format_line(
610 612 linetpl, filename, start + 1 + i, line, arrow=show_arrow
611 613 )
612 614 )
613 615 return "".join(ret)
614 616
615 617 def __format_line(self, tpl_line, filename, lineno, line, arrow=False):
616 618 bp_mark = ""
617 619 bp_mark_color = ""
618 620
619 621 new_line, err = self.parser.format2(line, 'str')
620 622 if not err:
621 623 line = new_line
622 624
623 625 bp = None
624 626 if lineno in self.get_file_breaks(filename):
625 627 bps = self.get_breaks(filename, lineno)
626 628 bp = bps[-1]
627 629
628 630 if bp:
629 631 Colors = self.color_scheme_table.active_colors
630 632 bp_mark = str(bp.number)
631 633 bp_mark_color = Colors.breakpoint_enabled
632 634 if not bp.enabled:
633 635 bp_mark_color = Colors.breakpoint_disabled
634 636
635 637 numbers_width = 7
636 638 if arrow:
637 639 # This is the line with the error
638 640 pad = numbers_width - len(str(lineno)) - len(bp_mark)
639 641 num = '%s%s' % (make_arrow(pad), str(lineno))
640 642 else:
641 643 num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
642 644
643 645 return tpl_line % (bp_mark_color + bp_mark, num, line)
644 646
645 647 def print_list_lines(self, filename, first, last):
646 648 """The printing (as opposed to the parsing part of a 'list'
647 649 command."""
648 650 try:
649 651 Colors = self.color_scheme_table.active_colors
650 652 ColorsNormal = Colors.Normal
651 653 tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
652 654 tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
653 655 src = []
654 656 if filename == "<string>" and hasattr(self, "_exec_filename"):
655 657 filename = self._exec_filename
656 658
657 659 for lineno in range(first, last+1):
658 660 line = linecache.getline(filename, lineno)
659 661 if not line:
660 662 break
661 663
662 664 if lineno == self.curframe.f_lineno:
663 665 line = self.__format_line(
664 666 tpl_line_em, filename, lineno, line, arrow=True
665 667 )
666 668 else:
667 669 line = self.__format_line(
668 670 tpl_line, filename, lineno, line, arrow=False
669 671 )
670 672
671 673 src.append(line)
672 674 self.lineno = lineno
673 675
674 676 print(''.join(src), file=self.stdout)
675 677
676 678 except KeyboardInterrupt:
677 679 pass
678 680
679 681 def do_skip_predicates(self, args):
680 682 """
681 683 Turn on/off individual predicates as to whether a frame should be hidden/skip.
682 684
683 685 The global option to skip (or not) hidden frames is set with skip_hidden
684 686
685 687 To change the value of a predicate
686 688
687 689 skip_predicates key [true|false]
688 690
689 691 Call without arguments to see the current values.
690 692
691 693 To permanently change the value of an option add the corresponding
692 694 command to your ``~/.pdbrc`` file. If you are programmatically using the
693 695 Pdb instance you can also change the ``default_predicates`` class
694 696 attribute.
695 697 """
696 698 if not args.strip():
697 699 print("current predicates:")
698 700 for (p, v) in self._predicates.items():
699 701 print(" ", p, ":", v)
700 702 return
701 703 type_value = args.strip().split(" ")
702 704 if len(type_value) != 2:
703 705 print(
704 706 f"Usage: skip_predicates <type> <value>, with <type> one of {set(self._predicates.keys())}"
705 707 )
706 708 return
707 709
708 710 type_, value = type_value
709 711 if type_ not in self._predicates:
710 712 print(f"{type_!r} not in {set(self._predicates.keys())}")
711 713 return
712 714 if value.lower() not in ("true", "yes", "1", "no", "false", "0"):
713 715 print(
714 716 f"{value!r} is invalid - use one of ('true', 'yes', '1', 'no', 'false', '0')"
715 717 )
716 718 return
717 719
718 720 self._predicates[type_] = value.lower() in ("true", "yes", "1")
719 721 if not any(self._predicates.values()):
720 722 print(
721 723 "Warning, all predicates set to False, skip_hidden may not have any effects."
722 724 )
723 725
724 726 def do_skip_hidden(self, arg):
725 727 """
726 728 Change whether or not we should skip frames with the
727 729 __tracebackhide__ attribute.
728 730 """
729 731 if not arg.strip():
730 732 print(
731 733 f"skip_hidden = {self.skip_hidden}, use 'yes','no', 'true', or 'false' to change."
732 734 )
733 735 elif arg.strip().lower() in ("true", "yes"):
734 736 self.skip_hidden = True
735 737 elif arg.strip().lower() in ("false", "no"):
736 738 self.skip_hidden = False
737 739 if not any(self._predicates.values()):
738 740 print(
739 741 "Warning, all predicates set to False, skip_hidden may not have any effects."
740 742 )
741 743
742 744 def do_list(self, arg):
743 745 """Print lines of code from the current stack frame
744 746 """
745 747 self.lastcmd = 'list'
746 748 last = None
747 749 if arg:
748 750 try:
749 751 x = eval(arg, {}, {})
750 752 if type(x) == type(()):
751 753 first, last = x
752 754 first = int(first)
753 755 last = int(last)
754 756 if last < first:
755 757 # Assume it's a count
756 758 last = first + last
757 759 else:
758 760 first = max(1, int(x) - 5)
759 761 except:
760 762 print('*** Error in argument:', repr(arg), file=self.stdout)
761 763 return
762 764 elif self.lineno is None:
763 765 first = max(1, self.curframe.f_lineno - 5)
764 766 else:
765 767 first = self.lineno + 1
766 768 if last is None:
767 769 last = first + 10
768 770 self.print_list_lines(self.curframe.f_code.co_filename, first, last)
769 771
770 772 # vds: >>
771 773 lineno = first
772 774 filename = self.curframe.f_code.co_filename
773 775 self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
774 776 # vds: <<
775 777
776 778 do_l = do_list
777 779
778 780 def getsourcelines(self, obj):
779 781 lines, lineno = inspect.findsource(obj)
780 782 if inspect.isframe(obj) and obj.f_globals is self._get_frame_locals(obj):
781 783 # must be a module frame: do not try to cut a block out of it
782 784 return lines, 1
783 785 elif inspect.ismodule(obj):
784 786 return lines, 1
785 787 return inspect.getblock(lines[lineno:]), lineno+1
786 788
787 789 def do_longlist(self, arg):
788 790 """Print lines of code from the current stack frame.
789 791
790 792 Shows more lines than 'list' does.
791 793 """
792 794 self.lastcmd = 'longlist'
793 795 try:
794 796 lines, lineno = self.getsourcelines(self.curframe)
795 797 except OSError as err:
796 798 self.error(err)
797 799 return
798 800 last = lineno + len(lines)
799 801 self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
800 802 do_ll = do_longlist
801 803
802 804 def do_debug(self, arg):
803 805 """debug code
804 806 Enter a recursive debugger that steps through the code
805 807 argument (which is an arbitrary expression or statement to be
806 808 executed in the current environment).
807 809 """
808 810 trace_function = sys.gettrace()
809 811 sys.settrace(None)
810 812 globals = self.curframe.f_globals
811 813 locals = self.curframe_locals
812 814 p = self.__class__(completekey=self.completekey,
813 815 stdin=self.stdin, stdout=self.stdout)
814 816 p.use_rawinput = self.use_rawinput
815 817 p.prompt = "(%s) " % self.prompt.strip()
816 818 self.message("ENTERING RECURSIVE DEBUGGER")
817 819 sys.call_tracing(p.run, (arg, globals, locals))
818 820 self.message("LEAVING RECURSIVE DEBUGGER")
819 821 sys.settrace(trace_function)
820 822 self.lastcmd = p.lastcmd
821 823
822 824 def do_pdef(self, arg):
823 825 """Print the call signature for any callable object.
824 826
825 827 The debugger interface to %pdef"""
826 828 namespaces = [
827 829 ("Locals", self.curframe_locals),
828 830 ("Globals", self.curframe.f_globals),
829 831 ]
830 832 self.shell.find_line_magic("pdef")(arg, namespaces=namespaces)
831 833
832 834 def do_pdoc(self, arg):
833 835 """Print the docstring for an object.
834 836
835 837 The debugger interface to %pdoc."""
836 838 namespaces = [
837 839 ("Locals", self.curframe_locals),
838 840 ("Globals", self.curframe.f_globals),
839 841 ]
840 842 self.shell.find_line_magic("pdoc")(arg, namespaces=namespaces)
841 843
842 844 def do_pfile(self, arg):
843 845 """Print (or run through pager) the file where an object is defined.
844 846
845 847 The debugger interface to %pfile.
846 848 """
847 849 namespaces = [
848 850 ("Locals", self.curframe_locals),
849 851 ("Globals", self.curframe.f_globals),
850 852 ]
851 853 self.shell.find_line_magic("pfile")(arg, namespaces=namespaces)
852 854
853 855 def do_pinfo(self, arg):
854 856 """Provide detailed information about an object.
855 857
856 858 The debugger interface to %pinfo, i.e., obj?."""
857 859 namespaces = [
858 860 ("Locals", self.curframe_locals),
859 861 ("Globals", self.curframe.f_globals),
860 862 ]
861 863 self.shell.find_line_magic("pinfo")(arg, namespaces=namespaces)
862 864
863 865 def do_pinfo2(self, arg):
864 866 """Provide extra detailed information about an object.
865 867
866 868 The debugger interface to %pinfo2, i.e., obj??."""
867 869 namespaces = [
868 870 ("Locals", self.curframe_locals),
869 871 ("Globals", self.curframe.f_globals),
870 872 ]
871 873 self.shell.find_line_magic("pinfo2")(arg, namespaces=namespaces)
872 874
873 875 def do_psource(self, arg):
874 876 """Print (or run through pager) the source code for an object."""
875 877 namespaces = [
876 878 ("Locals", self.curframe_locals),
877 879 ("Globals", self.curframe.f_globals),
878 880 ]
879 881 self.shell.find_line_magic("psource")(arg, namespaces=namespaces)
880 882
881 883 def do_where(self, arg):
882 884 """w(here)
883 885 Print a stack trace, with the most recent frame at the bottom.
884 886 An arrow indicates the "current frame", which determines the
885 887 context of most commands. 'bt' is an alias for this command.
886 888
887 889 Take a number as argument as an (optional) number of context line to
888 890 print"""
889 891 if arg:
890 892 try:
891 893 context = int(arg)
892 894 except ValueError as err:
893 895 self.error(err)
894 896 return
895 897 self.print_stack_trace(context)
896 898 else:
897 899 self.print_stack_trace()
898 900
899 901 do_w = do_where
900 902
901 903 def break_anywhere(self, frame):
902 904 """
903 905
904 906 _stop_in_decorator_internals is overly restrictive, as we may still want
905 907 to trace function calls, so we need to also update break_anywhere so
906 908 that is we don't `stop_here`, because of debugger skip, we may still
907 909 stop at any point inside the function
908 910
909 911 """
910 912
911 913 sup = super().break_anywhere(frame)
912 914 if sup:
913 915 return sup
914 916 if self._predicates["debuggerskip"]:
915 917 if DEBUGGERSKIP in frame.f_code.co_varnames:
916 918 return True
917 919 if frame.f_back and self._get_frame_locals(frame.f_back).get(DEBUGGERSKIP):
918 920 return True
919 921 return False
920 922
921 923 @skip_doctest
922 924 def _is_in_decorator_internal_and_should_skip(self, frame):
923 925 """
924 926 Utility to tell us whether we are in a decorator internal and should stop.
925 927
926 928
927 929
928 930 """
929 931
930 932 # if we are disabled don't skip
931 933 if not self._predicates["debuggerskip"]:
932 934 return False
933 935
934 936 # if frame is tagged, skip by default.
935 937 if DEBUGGERSKIP in frame.f_code.co_varnames:
936 938 return True
937 939
938 940 # if one of the parent frame value set to True skip as well.
939 941
940 942 cframe = frame
941 943 while getattr(cframe, "f_back", None):
942 944 cframe = cframe.f_back
943 945 if self._get_frame_locals(cframe).get(DEBUGGERSKIP):
944 946 return True
945 947
946 948 return False
947 949
948 950 def stop_here(self, frame):
949 951
950 952 if self._is_in_decorator_internal_and_should_skip(frame) is True:
951 953 return False
952 954
953 955 hidden = False
954 956 if self.skip_hidden:
955 957 hidden = self._hidden_predicate(frame)
956 958 if hidden:
957 959 if self.report_skipped:
958 960 Colors = self.color_scheme_table.active_colors
959 961 ColorsNormal = Colors.Normal
960 962 print(
961 963 f"{Colors.excName} [... skipped 1 hidden frame]{ColorsNormal}\n"
962 964 )
963 965 return super().stop_here(frame)
964 966
965 967 def do_up(self, arg):
966 968 """u(p) [count]
967 969 Move the current frame count (default one) levels up in the
968 970 stack trace (to an older frame).
969 971
970 972 Will skip hidden frames.
971 973 """
972 974 # modified version of upstream that skips
973 975 # frames with __tracebackhide__
974 976 if self.curindex == 0:
975 977 self.error("Oldest frame")
976 978 return
977 979 try:
978 980 count = int(arg or 1)
979 981 except ValueError:
980 982 self.error("Invalid frame count (%s)" % arg)
981 983 return
982 984 skipped = 0
983 985 if count < 0:
984 986 _newframe = 0
985 987 else:
986 988 counter = 0
987 989 hidden_frames = self.hidden_frames(self.stack)
988 990 for i in range(self.curindex - 1, -1, -1):
989 991 if hidden_frames[i] and self.skip_hidden:
990 992 skipped += 1
991 993 continue
992 994 counter += 1
993 995 if counter >= count:
994 996 break
995 997 else:
996 998 # if no break occured.
997 999 self.error(
998 1000 "all frames above hidden, use `skip_hidden False` to get get into those."
999 1001 )
1000 1002 return
1001 1003
1002 1004 Colors = self.color_scheme_table.active_colors
1003 1005 ColorsNormal = Colors.Normal
1004 1006 _newframe = i
1005 1007 self._select_frame(_newframe)
1006 1008 if skipped:
1007 1009 print(
1008 1010 f"{Colors.excName} [... skipped {skipped} hidden frame(s)]{ColorsNormal}\n"
1009 1011 )
1010 1012
1011 1013 def do_down(self, arg):
1012 1014 """d(own) [count]
1013 1015 Move the current frame count (default one) levels down in the
1014 1016 stack trace (to a newer frame).
1015 1017
1016 1018 Will skip hidden frames.
1017 1019 """
1018 1020 if self.curindex + 1 == len(self.stack):
1019 1021 self.error("Newest frame")
1020 1022 return
1021 1023 try:
1022 1024 count = int(arg or 1)
1023 1025 except ValueError:
1024 1026 self.error("Invalid frame count (%s)" % arg)
1025 1027 return
1026 1028 if count < 0:
1027 1029 _newframe = len(self.stack) - 1
1028 1030 else:
1029 1031 counter = 0
1030 1032 skipped = 0
1031 1033 hidden_frames = self.hidden_frames(self.stack)
1032 1034 for i in range(self.curindex + 1, len(self.stack)):
1033 1035 if hidden_frames[i] and self.skip_hidden:
1034 1036 skipped += 1
1035 1037 continue
1036 1038 counter += 1
1037 1039 if counter >= count:
1038 1040 break
1039 1041 else:
1040 1042 self.error(
1041 1043 "all frames bellow hidden, use `skip_hidden False` to get get into those."
1042 1044 )
1043 1045 return
1044 1046
1045 1047 Colors = self.color_scheme_table.active_colors
1046 1048 ColorsNormal = Colors.Normal
1047 1049 if skipped:
1048 1050 print(
1049 1051 f"{Colors.excName} [... skipped {skipped} hidden frame(s)]{ColorsNormal}\n"
1050 1052 )
1051 1053 _newframe = i
1052 1054
1053 1055 self._select_frame(_newframe)
1054 1056
1055 1057 do_d = do_down
1056 1058 do_u = do_up
1057 1059
1058 1060 def do_context(self, context):
1059 1061 """context number_of_lines
1060 1062 Set the number of lines of source code to show when displaying
1061 1063 stacktrace information.
1062 1064 """
1063 1065 try:
1064 1066 new_context = int(context)
1065 1067 if new_context <= 0:
1066 1068 raise ValueError()
1067 1069 self.context = new_context
1068 1070 except ValueError:
1069 1071 self.error("The 'context' command requires a positive integer argument.")
1070 1072
1071 1073
1072 1074 class InterruptiblePdb(Pdb):
1073 1075 """Version of debugger where KeyboardInterrupt exits the debugger altogether."""
1074 1076
1075 1077 def cmdloop(self, intro=None):
1076 1078 """Wrap cmdloop() such that KeyboardInterrupt stops the debugger."""
1077 1079 try:
1078 1080 return OldPdb.cmdloop(self, intro=intro)
1079 1081 except KeyboardInterrupt:
1080 1082 self.stop_here = lambda frame: False
1081 1083 self.do_quit("")
1082 1084 sys.settrace(None)
1083 1085 self.quitting = False
1084 1086 raise
1085 1087
1086 1088 def _cmdloop(self):
1087 1089 while True:
1088 1090 try:
1089 1091 # keyboard interrupts allow for an easy way to cancel
1090 1092 # the current command, so allow them during interactive input
1091 1093 self.allow_kbdint = True
1092 1094 self.cmdloop()
1093 1095 self.allow_kbdint = False
1094 1096 break
1095 1097 except KeyboardInterrupt:
1096 1098 self.message('--KeyboardInterrupt--')
1097 1099 raise
1098 1100
1099 1101
1100 1102 def set_trace(frame=None):
1101 1103 """
1102 1104 Start debugging from `frame`.
1103 1105
1104 1106 If frame is not specified, debugging starts from caller's frame.
1105 1107 """
1106 1108 Pdb().set_trace(frame or sys._getframe().f_back)
@@ -1,624 +1,624 b''
1 1 """IPython extension to reload modules before executing user code.
2 2
3 3 ``autoreload`` reloads modules automatically before entering the execution of
4 4 code typed at the IPython prompt.
5 5
6 6 This makes for example the following workflow possible:
7 7
8 8 .. sourcecode:: ipython
9 9
10 10 In [1]: %load_ext autoreload
11 11
12 12 In [2]: %autoreload 2
13 13
14 14 In [3]: from foo import some_function
15 15
16 16 In [4]: some_function()
17 17 Out[4]: 42
18 18
19 19 In [5]: # open foo.py in an editor and change some_function to return 43
20 20
21 21 In [6]: some_function()
22 22 Out[6]: 43
23 23
24 24 The module was reloaded without reloading it explicitly, and the object
25 25 imported with ``from foo import ...`` was also updated.
26 26
27 27 Usage
28 28 =====
29 29
30 30 The following magic commands are provided:
31 31
32 32 ``%autoreload``
33 33
34 34 Reload all modules (except those excluded by ``%aimport``)
35 35 automatically now.
36 36
37 37 ``%autoreload 0``
38 38
39 39 Disable automatic reloading.
40 40
41 41 ``%autoreload 1``
42 42
43 43 Reload all modules imported with ``%aimport`` every time before
44 44 executing the Python code typed.
45 45
46 46 ``%autoreload 2``
47 47
48 48 Reload all modules (except those excluded by ``%aimport``) every
49 49 time before executing the Python code typed.
50 50
51 51 ``%autoreload 3``
52 52
53 53 Reload all modules AND autoload newly added objects
54 54 every time before executing the Python code typed.
55 55
56 56 ``%aimport``
57 57
58 58 List modules which are to be automatically imported or not to be imported.
59 59
60 60 ``%aimport foo``
61 61
62 62 Import module 'foo' and mark it to be autoreloaded for ``%autoreload 1``
63 63
64 64 ``%aimport foo, bar``
65 65
66 66 Import modules 'foo', 'bar' and mark them to be autoreloaded for ``%autoreload 1``
67 67
68 68 ``%aimport -foo``
69 69
70 70 Mark module 'foo' to not be autoreloaded.
71 71
72 72 Caveats
73 73 =======
74 74
75 75 Reloading Python modules in a reliable way is in general difficult,
76 76 and unexpected things may occur. ``%autoreload`` tries to work around
77 77 common pitfalls by replacing function code objects and parts of
78 78 classes previously in the module with new versions. This makes the
79 79 following things to work:
80 80
81 81 - Functions and classes imported via 'from xxx import foo' are upgraded
82 82 to new versions when 'xxx' is reloaded.
83 83
84 84 - Methods and properties of classes are upgraded on reload, so that
85 85 calling 'c.foo()' on an object 'c' created before the reload causes
86 86 the new code for 'foo' to be executed.
87 87
88 88 Some of the known remaining caveats are:
89 89
90 90 - Replacing code objects does not always succeed: changing a @property
91 91 in a class to an ordinary method or a method to a member variable
92 92 can cause problems (but in old objects only).
93 93
94 94 - Functions that are removed (eg. via monkey-patching) from a module
95 95 before it is reloaded are not upgraded.
96 96
97 97 - C extension modules cannot be reloaded, and so cannot be autoreloaded.
98 98 """
99 99
100 skip_doctest = True
100 __skip_doctest__ = True
101 101
102 102 # -----------------------------------------------------------------------------
103 103 # Copyright (C) 2000 Thomas Heller
104 104 # Copyright (C) 2008 Pauli Virtanen <pav@iki.fi>
105 105 # Copyright (C) 2012 The IPython Development Team
106 106 #
107 107 # Distributed under the terms of the BSD License. The full license is in
108 108 # the file COPYING, distributed as part of this software.
109 109 # -----------------------------------------------------------------------------
110 110 #
111 111 # This IPython module is written by Pauli Virtanen, based on the autoreload
112 112 # code by Thomas Heller.
113 113
114 114 # -----------------------------------------------------------------------------
115 115 # Imports
116 116 # -----------------------------------------------------------------------------
117 117
118 118 import os
119 119 import sys
120 120 import traceback
121 121 import types
122 122 import weakref
123 123 import gc
124 124 from importlib import import_module
125 125 from importlib.util import source_from_cache
126 126 from imp import reload
127 127
128 128 # ------------------------------------------------------------------------------
129 129 # Autoreload functionality
130 130 # ------------------------------------------------------------------------------
131 131
132 132
133 133 class ModuleReloader:
134 134 enabled = False
135 135 """Whether this reloader is enabled"""
136 136
137 137 check_all = True
138 138 """Autoreload all modules, not just those listed in 'modules'"""
139 139
140 140 autoload_obj = False
141 141 """Autoreload all modules AND autoload all new objects"""
142 142
143 143 def __init__(self, shell=None):
144 144 # Modules that failed to reload: {module: mtime-on-failed-reload, ...}
145 145 self.failed = {}
146 146 # Modules specially marked as autoreloadable.
147 147 self.modules = {}
148 148 # Modules specially marked as not autoreloadable.
149 149 self.skip_modules = {}
150 150 # (module-name, name) -> weakref, for replacing old code objects
151 151 self.old_objects = {}
152 152 # Module modification timestamps
153 153 self.modules_mtimes = {}
154 154 self.shell = shell
155 155
156 156 # Cache module modification times
157 157 self.check(check_all=True, do_reload=False)
158 158
159 159 def mark_module_skipped(self, module_name):
160 160 """Skip reloading the named module in the future"""
161 161 try:
162 162 del self.modules[module_name]
163 163 except KeyError:
164 164 pass
165 165 self.skip_modules[module_name] = True
166 166
167 167 def mark_module_reloadable(self, module_name):
168 168 """Reload the named module in the future (if it is imported)"""
169 169 try:
170 170 del self.skip_modules[module_name]
171 171 except KeyError:
172 172 pass
173 173 self.modules[module_name] = True
174 174
175 175 def aimport_module(self, module_name):
176 176 """Import a module, and mark it reloadable
177 177
178 178 Returns
179 179 -------
180 180 top_module : module
181 181 The imported module if it is top-level, or the top-level
182 182 top_name : module
183 183 Name of top_module
184 184
185 185 """
186 186 self.mark_module_reloadable(module_name)
187 187
188 188 import_module(module_name)
189 189 top_name = module_name.split(".")[0]
190 190 top_module = sys.modules[top_name]
191 191 return top_module, top_name
192 192
193 193 def filename_and_mtime(self, module):
194 194 if not hasattr(module, "__file__") or module.__file__ is None:
195 195 return None, None
196 196
197 197 if getattr(module, "__name__", None) in [None, "__mp_main__", "__main__"]:
198 198 # we cannot reload(__main__) or reload(__mp_main__)
199 199 return None, None
200 200
201 201 filename = module.__file__
202 202 path, ext = os.path.splitext(filename)
203 203
204 204 if ext.lower() == ".py":
205 205 py_filename = filename
206 206 else:
207 207 try:
208 208 py_filename = source_from_cache(filename)
209 209 except ValueError:
210 210 return None, None
211 211
212 212 try:
213 213 pymtime = os.stat(py_filename).st_mtime
214 214 except OSError:
215 215 return None, None
216 216
217 217 return py_filename, pymtime
218 218
219 219 def check(self, check_all=False, do_reload=True):
220 220 """Check whether some modules need to be reloaded."""
221 221
222 222 if not self.enabled and not check_all:
223 223 return
224 224
225 225 if check_all or self.check_all:
226 226 modules = list(sys.modules.keys())
227 227 else:
228 228 modules = list(self.modules.keys())
229 229
230 230 for modname in modules:
231 231 m = sys.modules.get(modname, None)
232 232
233 233 if modname in self.skip_modules:
234 234 continue
235 235
236 236 py_filename, pymtime = self.filename_and_mtime(m)
237 237 if py_filename is None:
238 238 continue
239 239
240 240 try:
241 241 if pymtime <= self.modules_mtimes[modname]:
242 242 continue
243 243 except KeyError:
244 244 self.modules_mtimes[modname] = pymtime
245 245 continue
246 246 else:
247 247 if self.failed.get(py_filename, None) == pymtime:
248 248 continue
249 249
250 250 self.modules_mtimes[modname] = pymtime
251 251
252 252 # If we've reached this point, we should try to reload the module
253 253 if do_reload:
254 254 try:
255 255 if self.autoload_obj:
256 256 superreload(m, reload, self.old_objects, self.shell)
257 257 else:
258 258 superreload(m, reload, self.old_objects)
259 259 if py_filename in self.failed:
260 260 del self.failed[py_filename]
261 261 except:
262 262 print(
263 263 "[autoreload of {} failed: {}]".format(
264 264 modname, traceback.format_exc(10)
265 265 ),
266 266 file=sys.stderr,
267 267 )
268 268 self.failed[py_filename] = pymtime
269 269
270 270
271 271 # ------------------------------------------------------------------------------
272 272 # superreload
273 273 # ------------------------------------------------------------------------------
274 274
275 275
276 276 func_attrs = [
277 277 "__code__",
278 278 "__defaults__",
279 279 "__doc__",
280 280 "__closure__",
281 281 "__globals__",
282 282 "__dict__",
283 283 ]
284 284
285 285
286 286 def update_function(old, new):
287 287 """Upgrade the code object of a function"""
288 288 for name in func_attrs:
289 289 try:
290 290 setattr(old, name, getattr(new, name))
291 291 except (AttributeError, TypeError):
292 292 pass
293 293
294 294
295 295 def update_instances(old, new):
296 296 """Use garbage collector to find all instances that refer to the old
297 297 class definition and update their __class__ to point to the new class
298 298 definition"""
299 299
300 300 refs = gc.get_referrers(old)
301 301
302 302 for ref in refs:
303 303 if type(ref) is old:
304 304 ref.__class__ = new
305 305
306 306
307 307 def update_class(old, new):
308 308 """Replace stuff in the __dict__ of a class, and upgrade
309 309 method code objects, and add new methods, if any"""
310 310 for key in list(old.__dict__.keys()):
311 311 old_obj = getattr(old, key)
312 312 try:
313 313 new_obj = getattr(new, key)
314 314 # explicitly checking that comparison returns True to handle
315 315 # cases where `==` doesn't return a boolean.
316 316 if (old_obj == new_obj) is True:
317 317 continue
318 318 except AttributeError:
319 319 # obsolete attribute: remove it
320 320 try:
321 321 delattr(old, key)
322 322 except (AttributeError, TypeError):
323 323 pass
324 324 continue
325 325
326 326 if update_generic(old_obj, new_obj):
327 327 continue
328 328
329 329 try:
330 330 setattr(old, key, getattr(new, key))
331 331 except (AttributeError, TypeError):
332 332 pass # skip non-writable attributes
333 333
334 334 for key in list(new.__dict__.keys()):
335 335 if key not in list(old.__dict__.keys()):
336 336 try:
337 337 setattr(old, key, getattr(new, key))
338 338 except (AttributeError, TypeError):
339 339 pass # skip non-writable attributes
340 340
341 341 # update all instances of class
342 342 update_instances(old, new)
343 343
344 344
345 345 def update_property(old, new):
346 346 """Replace get/set/del functions of a property"""
347 347 update_generic(old.fdel, new.fdel)
348 348 update_generic(old.fget, new.fget)
349 349 update_generic(old.fset, new.fset)
350 350
351 351
352 352 def isinstance2(a, b, typ):
353 353 return isinstance(a, typ) and isinstance(b, typ)
354 354
355 355
356 356 UPDATE_RULES = [
357 357 (lambda a, b: isinstance2(a, b, type), update_class),
358 358 (lambda a, b: isinstance2(a, b, types.FunctionType), update_function),
359 359 (lambda a, b: isinstance2(a, b, property), update_property),
360 360 ]
361 361 UPDATE_RULES.extend(
362 362 [
363 363 (
364 364 lambda a, b: isinstance2(a, b, types.MethodType),
365 365 lambda a, b: update_function(a.__func__, b.__func__),
366 366 ),
367 367 ]
368 368 )
369 369
370 370
371 371 def update_generic(a, b):
372 372 for type_check, update in UPDATE_RULES:
373 373 if type_check(a, b):
374 374 update(a, b)
375 375 return True
376 376 return False
377 377
378 378
379 379 class StrongRef:
380 380 def __init__(self, obj):
381 381 self.obj = obj
382 382
383 383 def __call__(self):
384 384 return self.obj
385 385
386 386
387 387 mod_attrs = [
388 388 "__name__",
389 389 "__doc__",
390 390 "__package__",
391 391 "__loader__",
392 392 "__spec__",
393 393 "__file__",
394 394 "__cached__",
395 395 "__builtins__",
396 396 ]
397 397
398 398
399 399 def append_obj(module, d, name, obj, autoload=False):
400 400 in_module = hasattr(obj, "__module__") and obj.__module__ == module.__name__
401 401 if autoload:
402 402 # check needed for module global built-ins
403 403 if not in_module and name in mod_attrs:
404 404 return False
405 405 else:
406 406 if not in_module:
407 407 return False
408 408
409 409 key = (module.__name__, name)
410 410 try:
411 411 d.setdefault(key, []).append(weakref.ref(obj))
412 412 except TypeError:
413 413 pass
414 414 return True
415 415
416 416
417 417 def superreload(module, reload=reload, old_objects=None, shell=None):
418 418 """Enhanced version of the builtin reload function.
419 419
420 420 superreload remembers objects previously in the module, and
421 421
422 422 - upgrades the class dictionary of every old class in the module
423 423 - upgrades the code object of every old function and method
424 424 - clears the module's namespace before reloading
425 425
426 426 """
427 427 if old_objects is None:
428 428 old_objects = {}
429 429
430 430 # collect old objects in the module
431 431 for name, obj in list(module.__dict__.items()):
432 432 if not append_obj(module, old_objects, name, obj):
433 433 continue
434 434 key = (module.__name__, name)
435 435 try:
436 436 old_objects.setdefault(key, []).append(weakref.ref(obj))
437 437 except TypeError:
438 438 pass
439 439
440 440 # reload module
441 441 try:
442 442 # clear namespace first from old cruft
443 443 old_dict = module.__dict__.copy()
444 444 old_name = module.__name__
445 445 module.__dict__.clear()
446 446 module.__dict__["__name__"] = old_name
447 447 module.__dict__["__loader__"] = old_dict["__loader__"]
448 448 except (TypeError, AttributeError, KeyError):
449 449 pass
450 450
451 451 try:
452 452 module = reload(module)
453 453 except:
454 454 # restore module dictionary on failed reload
455 455 module.__dict__.update(old_dict)
456 456 raise
457 457
458 458 # iterate over all objects and update functions & classes
459 459 for name, new_obj in list(module.__dict__.items()):
460 460 key = (module.__name__, name)
461 461 if key not in old_objects:
462 462 # here 'shell' acts both as a flag and as an output var
463 463 if (
464 464 shell is None
465 465 or name == "Enum"
466 466 or not append_obj(module, old_objects, name, new_obj, True)
467 467 ):
468 468 continue
469 469 shell.user_ns[name] = new_obj
470 470
471 471 new_refs = []
472 472 for old_ref in old_objects[key]:
473 473 old_obj = old_ref()
474 474 if old_obj is None:
475 475 continue
476 476 new_refs.append(old_ref)
477 477 update_generic(old_obj, new_obj)
478 478
479 479 if new_refs:
480 480 old_objects[key] = new_refs
481 481 else:
482 482 del old_objects[key]
483 483
484 484 return module
485 485
486 486
487 487 # ------------------------------------------------------------------------------
488 488 # IPython connectivity
489 489 # ------------------------------------------------------------------------------
490 490
491 491 from IPython.core.magic import Magics, magics_class, line_magic
492 492
493 493
494 494 @magics_class
495 495 class AutoreloadMagics(Magics):
496 496 def __init__(self, *a, **kw):
497 497 super().__init__(*a, **kw)
498 498 self._reloader = ModuleReloader(self.shell)
499 499 self._reloader.check_all = False
500 500 self._reloader.autoload_obj = False
501 501 self.loaded_modules = set(sys.modules)
502 502
503 503 @line_magic
504 504 def autoreload(self, parameter_s=""):
505 505 r"""%autoreload => Reload modules automatically
506 506
507 507 %autoreload
508 508 Reload all modules (except those excluded by %aimport) automatically
509 509 now.
510 510
511 511 %autoreload 0
512 512 Disable automatic reloading.
513 513
514 514 %autoreload 1
515 515 Reload all modules imported with %aimport every time before executing
516 516 the Python code typed.
517 517
518 518 %autoreload 2
519 519 Reload all modules (except those excluded by %aimport) every time
520 520 before executing the Python code typed.
521 521
522 522 Reloading Python modules in a reliable way is in general
523 523 difficult, and unexpected things may occur. %autoreload tries to
524 524 work around common pitfalls by replacing function code objects and
525 525 parts of classes previously in the module with new versions. This
526 526 makes the following things to work:
527 527
528 528 - Functions and classes imported via 'from xxx import foo' are upgraded
529 529 to new versions when 'xxx' is reloaded.
530 530
531 531 - Methods and properties of classes are upgraded on reload, so that
532 532 calling 'c.foo()' on an object 'c' created before the reload causes
533 533 the new code for 'foo' to be executed.
534 534
535 535 Some of the known remaining caveats are:
536 536
537 537 - Replacing code objects does not always succeed: changing a @property
538 538 in a class to an ordinary method or a method to a member variable
539 539 can cause problems (but in old objects only).
540 540
541 541 - Functions that are removed (eg. via monkey-patching) from a module
542 542 before it is reloaded are not upgraded.
543 543
544 544 - C extension modules cannot be reloaded, and so cannot be
545 545 autoreloaded.
546 546
547 547 """
548 548 if parameter_s == "":
549 549 self._reloader.check(True)
550 550 elif parameter_s == "0":
551 551 self._reloader.enabled = False
552 552 elif parameter_s == "1":
553 553 self._reloader.check_all = False
554 554 self._reloader.enabled = True
555 555 elif parameter_s == "2":
556 556 self._reloader.check_all = True
557 557 self._reloader.enabled = True
558 558 self._reloader.enabled = True
559 559 elif parameter_s == "3":
560 560 self._reloader.check_all = True
561 561 self._reloader.enabled = True
562 562 self._reloader.autoload_obj = True
563 563
564 564 @line_magic
565 565 def aimport(self, parameter_s="", stream=None):
566 566 """%aimport => Import modules for automatic reloading.
567 567
568 568 %aimport
569 569 List modules to automatically import and not to import.
570 570
571 571 %aimport foo
572 572 Import module 'foo' and mark it to be autoreloaded for %autoreload 1
573 573
574 574 %aimport foo, bar
575 575 Import modules 'foo', 'bar' and mark them to be autoreloaded for %autoreload 1
576 576
577 577 %aimport -foo
578 578 Mark module 'foo' to not be autoreloaded for %autoreload 1
579 579 """
580 580 modname = parameter_s
581 581 if not modname:
582 582 to_reload = sorted(self._reloader.modules.keys())
583 583 to_skip = sorted(self._reloader.skip_modules.keys())
584 584 if stream is None:
585 585 stream = sys.stdout
586 586 if self._reloader.check_all:
587 587 stream.write("Modules to reload:\nall-except-skipped\n")
588 588 else:
589 589 stream.write("Modules to reload:\n%s\n" % " ".join(to_reload))
590 590 stream.write("\nModules to skip:\n%s\n" % " ".join(to_skip))
591 591 elif modname.startswith("-"):
592 592 modname = modname[1:]
593 593 self._reloader.mark_module_skipped(modname)
594 594 else:
595 595 for _module in [_.strip() for _ in modname.split(",")]:
596 596 top_module, top_name = self._reloader.aimport_module(_module)
597 597
598 598 # Inject module to user namespace
599 599 self.shell.push({top_name: top_module})
600 600
601 601 def pre_run_cell(self):
602 602 if self._reloader.enabled:
603 603 try:
604 604 self._reloader.check()
605 605 except:
606 606 pass
607 607
608 608 def post_execute_hook(self):
609 609 """Cache the modification times of any modules imported in this execution"""
610 610 newly_loaded_modules = set(sys.modules) - self.loaded_modules
611 611 for modname in newly_loaded_modules:
612 612 _, pymtime = self._reloader.filename_and_mtime(sys.modules[modname])
613 613 if pymtime is not None:
614 614 self._reloader.modules_mtimes[modname] = pymtime
615 615
616 616 self.loaded_modules.update(newly_loaded_modules)
617 617
618 618
619 619 def load_ipython_extension(ip):
620 620 """Load the extension in IPython."""
621 621 auto_reload = AutoreloadMagics(ip)
622 622 ip.register_magics(auto_reload)
623 623 ip.events.register("pre_run_cell", auto_reload.pre_run_cell)
624 624 ip.events.register("post_execute", auto_reload.post_execute_hook)
@@ -1,762 +1,762 b''
1 1 """Nose Plugin that supports IPython doctests.
2 2
3 3 Limitations:
4 4
5 5 - When generating examples for use as doctests, make sure that you have
6 6 pretty-printing OFF. This can be done either by setting the
7 7 ``PlainTextFormatter.pprint`` option in your configuration file to False, or
8 8 by interactively disabling it with %Pprint. This is required so that IPython
9 9 output matches that of normal Python, which is used by doctest for internal
10 10 execution.
11 11
12 12 - Do not rely on specific prompt numbers for results (such as using
13 13 '_34==True', for example). For IPython tests run via an external process the
14 14 prompt numbers may be different, and IPython tests run as normal python code
15 15 won't even have these special _NN variables set at all.
16 16 """
17 17
18 18 #-----------------------------------------------------------------------------
19 19 # Module imports
20 20
21 21 # From the standard library
22 22 import builtins as builtin_mod
23 23 import doctest
24 24 import inspect
25 25 import logging
26 26 import os
27 27 import re
28 28 import sys
29 29 from importlib import import_module
30 30 from io import StringIO
31 31
32 32 from testpath import modified_env
33 33
34 34 from inspect import getmodule
35 35
36 36 from pathlib import Path, PurePath
37 37
38 38 # We are overriding the default doctest runner, so we need to import a few
39 39 # things from doctest directly
40 40 from doctest import (REPORTING_FLAGS, REPORT_ONLY_FIRST_FAILURE,
41 41 _unittest_reportflags, DocTestRunner,
42 42 _extract_future_flags, pdb, _OutputRedirectingPdb,
43 43 _exception_traceback,
44 44 linecache)
45 45
46 46 # Third-party modules
47 47
48 48 from nose.plugins import doctests, Plugin
49 49 from nose.util import anyp, tolist
50 50
51 51 #-----------------------------------------------------------------------------
52 52 # Module globals and other constants
53 53 #-----------------------------------------------------------------------------
54 54
55 55 log = logging.getLogger(__name__)
56 56
57 57
58 58 #-----------------------------------------------------------------------------
59 59 # Classes and functions
60 60 #-----------------------------------------------------------------------------
61 61
62 62 def is_extension_module(filename):
63 63 """Return whether the given filename is an extension module.
64 64
65 65 This simply checks that the extension is either .so or .pyd.
66 66 """
67 67 return os.path.splitext(filename)[1].lower() in ('.so','.pyd')
68 68
69 69
70 70 class DocTestSkip(object):
71 71 """Object wrapper for doctests to be skipped."""
72 72
73 73 ds_skip = """Doctest to skip.
74 74 >>> 1 #doctest: +SKIP
75 75 """
76 76
77 77 def __init__(self,obj):
78 78 self.obj = obj
79 79
80 80 def __getattribute__(self,key):
81 81 if key == '__doc__':
82 82 return DocTestSkip.ds_skip
83 83 else:
84 84 return getattr(object.__getattribute__(self,'obj'),key)
85 85
86 86 # Modified version of the one in the stdlib, that fixes a python bug (doctests
87 87 # not found in extension modules, http://bugs.python.org/issue3158)
88 88 class DocTestFinder(doctest.DocTestFinder):
89 89
90 90 def _from_module(self, module, object):
91 91 """
92 92 Return true if the given object is defined in the given
93 93 module.
94 94 """
95 95 if module is None:
96 96 return True
97 97 elif inspect.isfunction(object):
98 98 return module.__dict__ is object.__globals__
99 99 elif inspect.isbuiltin(object):
100 100 return module.__name__ == object.__module__
101 101 elif inspect.isclass(object):
102 102 return module.__name__ == object.__module__
103 103 elif inspect.ismethod(object):
104 104 # This one may be a bug in cython that fails to correctly set the
105 105 # __module__ attribute of methods, but since the same error is easy
106 106 # to make by extension code writers, having this safety in place
107 107 # isn't such a bad idea
108 108 return module.__name__ == object.__self__.__class__.__module__
109 109 elif inspect.getmodule(object) is not None:
110 110 return module is inspect.getmodule(object)
111 111 elif hasattr(object, '__module__'):
112 112 return module.__name__ == object.__module__
113 113 elif isinstance(object, property):
114 114 return True # [XX] no way not be sure.
115 115 elif inspect.ismethoddescriptor(object):
116 116 # Unbound PyQt signals reach this point in Python 3.4b3, and we want
117 117 # to avoid throwing an error. See also http://bugs.python.org/issue3158
118 118 return False
119 119 else:
120 120 raise ValueError("object must be a class or function, got %r" % object)
121 121
122 122 def _find(self, tests, obj, name, module, source_lines, globs, seen):
123 123 """
124 124 Find tests for the given object and any contained objects, and
125 125 add them to `tests`.
126 126 """
127 127 print('_find for:', obj, name, module) # dbg
128 if hasattr(obj,"skip_doctest"):
128 if bool(getattr(obj, "__skip_doctest__", False)):
129 129 #print 'SKIPPING DOCTEST FOR:',obj # dbg
130 130 obj = DocTestSkip(obj)
131 131
132 132 doctest.DocTestFinder._find(self,tests, obj, name, module,
133 133 source_lines, globs, seen)
134 134
135 135 # Below we re-run pieces of the above method with manual modifications,
136 136 # because the original code is buggy and fails to correctly identify
137 137 # doctests in extension modules.
138 138
139 139 # Local shorthands
140 140 from inspect import isroutine, isclass
141 141
142 142 # Look for tests in a module's contained objects.
143 143 if inspect.ismodule(obj) and self._recurse:
144 144 for valname, val in obj.__dict__.items():
145 145 valname1 = '%s.%s' % (name, valname)
146 146 if ( (isroutine(val) or isclass(val))
147 147 and self._from_module(module, val) ):
148 148
149 149 self._find(tests, val, valname1, module, source_lines,
150 150 globs, seen)
151 151
152 152 # Look for tests in a class's contained objects.
153 153 if inspect.isclass(obj) and self._recurse:
154 154 #print 'RECURSE into class:',obj # dbg
155 155 for valname, val in obj.__dict__.items():
156 156 # Special handling for staticmethod/classmethod.
157 157 if isinstance(val, staticmethod):
158 158 val = getattr(obj, valname)
159 159 if isinstance(val, classmethod):
160 160 val = getattr(obj, valname).__func__
161 161
162 162 # Recurse to methods, properties, and nested classes.
163 163 if ((inspect.isfunction(val) or inspect.isclass(val) or
164 164 inspect.ismethod(val) or
165 165 isinstance(val, property)) and
166 166 self._from_module(module, val)):
167 167 valname = '%s.%s' % (name, valname)
168 168 self._find(tests, val, valname, module, source_lines,
169 169 globs, seen)
170 170
171 171
172 172 class IPDoctestOutputChecker(doctest.OutputChecker):
173 173 """Second-chance checker with support for random tests.
174 174
175 175 If the default comparison doesn't pass, this checker looks in the expected
176 176 output string for flags that tell us to ignore the output.
177 177 """
178 178
179 179 random_re = re.compile(r'#\s*random\s+')
180 180
181 181 def check_output(self, want, got, optionflags):
182 182 """Check output, accepting special markers embedded in the output.
183 183
184 184 If the output didn't pass the default validation but the special string
185 185 '#random' is included, we accept it."""
186 186
187 187 # Let the original tester verify first, in case people have valid tests
188 188 # that happen to have a comment saying '#random' embedded in.
189 189 ret = doctest.OutputChecker.check_output(self, want, got,
190 190 optionflags)
191 191 if not ret and self.random_re.search(want):
192 192 #print >> sys.stderr, 'RANDOM OK:',want # dbg
193 193 return True
194 194
195 195 return ret
196 196
197 197
198 198 class DocTestCase(doctests.DocTestCase):
199 199 """Proxy for DocTestCase: provides an address() method that
200 200 returns the correct address for the doctest case. Otherwise
201 201 acts as a proxy to the test case. To provide hints for address(),
202 202 an obj may also be passed -- this will be used as the test object
203 203 for purposes of determining the test address, if it is provided.
204 204 """
205 205
206 206 # Note: this method was taken from numpy's nosetester module.
207 207
208 208 # Subclass nose.plugins.doctests.DocTestCase to work around a bug in
209 209 # its constructor that blocks non-default arguments from being passed
210 210 # down into doctest.DocTestCase
211 211
212 212 def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
213 213 checker=None, obj=None, result_var='_'):
214 214 self._result_var = result_var
215 215 doctests.DocTestCase.__init__(self, test,
216 216 optionflags=optionflags,
217 217 setUp=setUp, tearDown=tearDown,
218 218 checker=checker)
219 219 # Now we must actually copy the original constructor from the stdlib
220 220 # doctest class, because we can't call it directly and a bug in nose
221 221 # means it never gets passed the right arguments.
222 222
223 223 self._dt_optionflags = optionflags
224 224 self._dt_checker = checker
225 225 self._dt_test = test
226 226 self._dt_test_globs_ori = test.globs
227 227 self._dt_setUp = setUp
228 228 self._dt_tearDown = tearDown
229 229
230 230 # XXX - store this runner once in the object!
231 231 runner = IPDocTestRunner(optionflags=optionflags,
232 232 checker=checker, verbose=False)
233 233 self._dt_runner = runner
234 234
235 235
236 236 # Each doctest should remember the directory it was loaded from, so
237 237 # things like %run work without too many contortions
238 238 self._ori_dir = os.path.dirname(test.filename)
239 239
240 240 # Modified runTest from the default stdlib
241 241 def runTest(self):
242 242 test = self._dt_test
243 243 runner = self._dt_runner
244 244
245 245 old = sys.stdout
246 246 new = StringIO()
247 247 optionflags = self._dt_optionflags
248 248
249 249 if not (optionflags & REPORTING_FLAGS):
250 250 # The option flags don't include any reporting flags,
251 251 # so add the default reporting flags
252 252 optionflags |= _unittest_reportflags
253 253
254 254 try:
255 255 # Save our current directory and switch out to the one where the
256 256 # test was originally created, in case another doctest did a
257 257 # directory change. We'll restore this in the finally clause.
258 258 curdir = os.getcwd()
259 259 #print 'runTest in dir:', self._ori_dir # dbg
260 260 os.chdir(self._ori_dir)
261 261
262 262 runner.DIVIDER = "-"*70
263 263 failures, tries = runner.run(test,out=new.write,
264 264 clear_globs=False)
265 265 finally:
266 266 sys.stdout = old
267 267 os.chdir(curdir)
268 268
269 269 if failures:
270 270 raise self.failureException(self.format_failure(new.getvalue()))
271 271
272 272 def setUp(self):
273 273 """Modified test setup that syncs with ipython namespace"""
274 274 #print "setUp test", self._dt_test.examples # dbg
275 275 if isinstance(self._dt_test.examples[0], IPExample):
276 276 # for IPython examples *only*, we swap the globals with the ipython
277 277 # namespace, after updating it with the globals (which doctest
278 278 # fills with the necessary info from the module being tested).
279 279 self.user_ns_orig = {}
280 280 self.user_ns_orig.update(_ip.user_ns)
281 281 _ip.user_ns.update(self._dt_test.globs)
282 282 # We must remove the _ key in the namespace, so that Python's
283 283 # doctest code sets it naturally
284 284 _ip.user_ns.pop('_', None)
285 285 _ip.user_ns['__builtins__'] = builtin_mod
286 286 self._dt_test.globs = _ip.user_ns
287 287
288 288 super(DocTestCase, self).setUp()
289 289
290 290 def tearDown(self):
291 291
292 292 # Undo the test.globs reassignment we made, so that the parent class
293 293 # teardown doesn't destroy the ipython namespace
294 294 if isinstance(self._dt_test.examples[0], IPExample):
295 295 self._dt_test.globs = self._dt_test_globs_ori
296 296 _ip.user_ns.clear()
297 297 _ip.user_ns.update(self.user_ns_orig)
298 298
299 299 # XXX - fperez: I am not sure if this is truly a bug in nose 0.11, but
300 300 # it does look like one to me: its tearDown method tries to run
301 301 #
302 302 # delattr(builtin_mod, self._result_var)
303 303 #
304 304 # without checking that the attribute really is there; it implicitly
305 305 # assumes it should have been set via displayhook. But if the
306 306 # displayhook was never called, this doesn't necessarily happen. I
307 307 # haven't been able to find a little self-contained example outside of
308 308 # ipython that would show the problem so I can report it to the nose
309 309 # team, but it does happen a lot in our code.
310 310 #
311 311 # So here, we just protect as narrowly as possible by trapping an
312 312 # attribute error whose message would be the name of self._result_var,
313 313 # and letting any other error propagate.
314 314 try:
315 315 super(DocTestCase, self).tearDown()
316 316 except AttributeError as exc:
317 317 if exc.args[0] != self._result_var:
318 318 raise
319 319
320 320
321 321 # A simple subclassing of the original with a different class name, so we can
322 322 # distinguish and treat differently IPython examples from pure python ones.
323 323 class IPExample(doctest.Example): pass
324 324
325 325
326 326 class IPExternalExample(doctest.Example):
327 327 """Doctest examples to be run in an external process."""
328 328
329 329 def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
330 330 options=None):
331 331 # Parent constructor
332 332 doctest.Example.__init__(self,source,want,exc_msg,lineno,indent,options)
333 333
334 334 # An EXTRA newline is needed to prevent pexpect hangs
335 335 self.source += '\n'
336 336
337 337
338 338 class IPDocTestParser(doctest.DocTestParser):
339 339 """
340 340 A class used to parse strings containing doctest examples.
341 341
342 342 Note: This is a version modified to properly recognize IPython input and
343 343 convert any IPython examples into valid Python ones.
344 344 """
345 345 # This regular expression is used to find doctest examples in a
346 346 # string. It defines three groups: `source` is the source code
347 347 # (including leading indentation and prompts); `indent` is the
348 348 # indentation of the first (PS1) line of the source code; and
349 349 # `want` is the expected output (including leading indentation).
350 350
351 351 # Classic Python prompts or default IPython ones
352 352 _PS1_PY = r'>>>'
353 353 _PS2_PY = r'\.\.\.'
354 354
355 355 _PS1_IP = r'In\ \[\d+\]:'
356 356 _PS2_IP = r'\ \ \ \.\.\.+:'
357 357
358 358 _RE_TPL = r'''
359 359 # Source consists of a PS1 line followed by zero or more PS2 lines.
360 360 (?P<source>
361 361 (?:^(?P<indent> [ ]*) (?P<ps1> %s) .*) # PS1 line
362 362 (?:\n [ ]* (?P<ps2> %s) .*)*) # PS2 lines
363 363 \n? # a newline
364 364 # Want consists of any non-blank lines that do not start with PS1.
365 365 (?P<want> (?:(?![ ]*$) # Not a blank line
366 366 (?![ ]*%s) # Not a line starting with PS1
367 367 (?![ ]*%s) # Not a line starting with PS2
368 368 .*$\n? # But any other line
369 369 )*)
370 370 '''
371 371
372 372 _EXAMPLE_RE_PY = re.compile( _RE_TPL % (_PS1_PY,_PS2_PY,_PS1_PY,_PS2_PY),
373 373 re.MULTILINE | re.VERBOSE)
374 374
375 375 _EXAMPLE_RE_IP = re.compile( _RE_TPL % (_PS1_IP,_PS2_IP,_PS1_IP,_PS2_IP),
376 376 re.MULTILINE | re.VERBOSE)
377 377
378 378 # Mark a test as being fully random. In this case, we simply append the
379 379 # random marker ('#random') to each individual example's output. This way
380 380 # we don't need to modify any other code.
381 381 _RANDOM_TEST = re.compile(r'#\s*all-random\s+')
382 382
383 383 # Mark tests to be executed in an external process - currently unsupported.
384 384 _EXTERNAL_IP = re.compile(r'#\s*ipdoctest:\s*EXTERNAL')
385 385
386 386 def ip2py(self,source):
387 387 """Convert input IPython source into valid Python."""
388 388 block = _ip.input_transformer_manager.transform_cell(source)
389 389 if len(block.splitlines()) == 1:
390 390 return _ip.prefilter(block)
391 391 else:
392 392 return block
393 393
394 394 def parse(self, string, name='<string>'):
395 395 """
396 396 Divide the given string into examples and intervening text,
397 397 and return them as a list of alternating Examples and strings.
398 398 Line numbers for the Examples are 0-based. The optional
399 399 argument `name` is a name identifying this string, and is only
400 400 used for error messages.
401 401 """
402 402
403 403 #print 'Parse string:\n',string # dbg
404 404
405 405 string = string.expandtabs()
406 406 # If all lines begin with the same indentation, then strip it.
407 407 min_indent = self._min_indent(string)
408 408 if min_indent > 0:
409 409 string = '\n'.join([l[min_indent:] for l in string.split('\n')])
410 410
411 411 output = []
412 412 charno, lineno = 0, 0
413 413
414 414 # We make 'all random' tests by adding the '# random' mark to every
415 415 # block of output in the test.
416 416 if self._RANDOM_TEST.search(string):
417 417 random_marker = '\n# random'
418 418 else:
419 419 random_marker = ''
420 420
421 421 # Whether to convert the input from ipython to python syntax
422 422 ip2py = False
423 423 # Find all doctest examples in the string. First, try them as Python
424 424 # examples, then as IPython ones
425 425 terms = list(self._EXAMPLE_RE_PY.finditer(string))
426 426 if terms:
427 427 # Normal Python example
428 428 #print '-'*70 # dbg
429 429 #print 'PyExample, Source:\n',string # dbg
430 430 #print '-'*70 # dbg
431 431 Example = doctest.Example
432 432 else:
433 433 # It's an ipython example. Note that IPExamples are run
434 434 # in-process, so their syntax must be turned into valid python.
435 435 # IPExternalExamples are run out-of-process (via pexpect) so they
436 436 # don't need any filtering (a real ipython will be executing them).
437 437 terms = list(self._EXAMPLE_RE_IP.finditer(string))
438 438 if self._EXTERNAL_IP.search(string):
439 439 #print '-'*70 # dbg
440 440 #print 'IPExternalExample, Source:\n',string # dbg
441 441 #print '-'*70 # dbg
442 442 Example = IPExternalExample
443 443 else:
444 444 #print '-'*70 # dbg
445 445 #print 'IPExample, Source:\n',string # dbg
446 446 #print '-'*70 # dbg
447 447 Example = IPExample
448 448 ip2py = True
449 449
450 450 for m in terms:
451 451 # Add the pre-example text to `output`.
452 452 output.append(string[charno:m.start()])
453 453 # Update lineno (lines before this example)
454 454 lineno += string.count('\n', charno, m.start())
455 455 # Extract info from the regexp match.
456 456 (source, options, want, exc_msg) = \
457 457 self._parse_example(m, name, lineno,ip2py)
458 458
459 459 # Append the random-output marker (it defaults to empty in most
460 460 # cases, it's only non-empty for 'all-random' tests):
461 461 want += random_marker
462 462
463 463 if Example is IPExternalExample:
464 464 options[doctest.NORMALIZE_WHITESPACE] = True
465 465 want += '\n'
466 466
467 467 # Create an Example, and add it to the list.
468 468 if not self._IS_BLANK_OR_COMMENT(source):
469 469 output.append(Example(source, want, exc_msg,
470 470 lineno=lineno,
471 471 indent=min_indent+len(m.group('indent')),
472 472 options=options))
473 473 # Update lineno (lines inside this example)
474 474 lineno += string.count('\n', m.start(), m.end())
475 475 # Update charno.
476 476 charno = m.end()
477 477 # Add any remaining post-example text to `output`.
478 478 output.append(string[charno:])
479 479 return output
480 480
481 481 def _parse_example(self, m, name, lineno,ip2py=False):
482 482 """
483 483 Given a regular expression match from `_EXAMPLE_RE` (`m`),
484 484 return a pair `(source, want)`, where `source` is the matched
485 485 example's source code (with prompts and indentation stripped);
486 486 and `want` is the example's expected output (with indentation
487 487 stripped).
488 488
489 489 `name` is the string's name, and `lineno` is the line number
490 490 where the example starts; both are used for error messages.
491 491
492 492 Optional:
493 493 `ip2py`: if true, filter the input via IPython to convert the syntax
494 494 into valid python.
495 495 """
496 496
497 497 # Get the example's indentation level.
498 498 indent = len(m.group('indent'))
499 499
500 500 # Divide source into lines; check that they're properly
501 501 # indented; and then strip their indentation & prompts.
502 502 source_lines = m.group('source').split('\n')
503 503
504 504 # We're using variable-length input prompts
505 505 ps1 = m.group('ps1')
506 506 ps2 = m.group('ps2')
507 507 ps1_len = len(ps1)
508 508
509 509 self._check_prompt_blank(source_lines, indent, name, lineno,ps1_len)
510 510 if ps2:
511 511 self._check_prefix(source_lines[1:], ' '*indent + ps2, name, lineno)
512 512
513 513 source = '\n'.join([sl[indent+ps1_len+1:] for sl in source_lines])
514 514
515 515 if ip2py:
516 516 # Convert source input from IPython into valid Python syntax
517 517 source = self.ip2py(source)
518 518
519 519 # Divide want into lines; check that it's properly indented; and
520 520 # then strip the indentation. Spaces before the last newline should
521 521 # be preserved, so plain rstrip() isn't good enough.
522 522 want = m.group('want')
523 523 want_lines = want.split('\n')
524 524 if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
525 525 del want_lines[-1] # forget final newline & spaces after it
526 526 self._check_prefix(want_lines, ' '*indent, name,
527 527 lineno + len(source_lines))
528 528
529 529 # Remove ipython output prompt that might be present in the first line
530 530 want_lines[0] = re.sub(r'Out\[\d+\]: \s*?\n?','',want_lines[0])
531 531
532 532 want = '\n'.join([wl[indent:] for wl in want_lines])
533 533
534 534 # If `want` contains a traceback message, then extract it.
535 535 m = self._EXCEPTION_RE.match(want)
536 536 if m:
537 537 exc_msg = m.group('msg')
538 538 else:
539 539 exc_msg = None
540 540
541 541 # Extract options from the source.
542 542 options = self._find_options(source, name, lineno)
543 543
544 544 return source, options, want, exc_msg
545 545
546 546 def _check_prompt_blank(self, lines, indent, name, lineno, ps1_len):
547 547 """
548 548 Given the lines of a source string (including prompts and
549 549 leading indentation), check to make sure that every prompt is
550 550 followed by a space character. If any line is not followed by
551 551 a space character, then raise ValueError.
552 552
553 553 Note: IPython-modified version which takes the input prompt length as a
554 554 parameter, so that prompts of variable length can be dealt with.
555 555 """
556 556 space_idx = indent+ps1_len
557 557 min_len = space_idx+1
558 558 for i, line in enumerate(lines):
559 559 if len(line) >= min_len and line[space_idx] != ' ':
560 560 raise ValueError('line %r of the docstring for %s '
561 561 'lacks blank after %s: %r' %
562 562 (lineno+i+1, name,
563 563 line[indent:space_idx], line))
564 564
565 565
566 566 SKIP = doctest.register_optionflag('SKIP')
567 567
568 568
569 569 class IPDocTestRunner(doctest.DocTestRunner,object):
570 570 """Test runner that synchronizes the IPython namespace with test globals.
571 571 """
572 572
573 573 def run(self, test, compileflags=None, out=None, clear_globs=True):
574 574
575 575 # Hack: ipython needs access to the execution context of the example,
576 576 # so that it can propagate user variables loaded by %run into
577 577 # test.globs. We put them here into our modified %run as a function
578 578 # attribute. Our new %run will then only make the namespace update
579 579 # when called (rather than unconditionally updating test.globs here
580 580 # for all examples, most of which won't be calling %run anyway).
581 581 #_ip._ipdoctest_test_globs = test.globs
582 582 #_ip._ipdoctest_test_filename = test.filename
583 583
584 584 test.globs.update(_ip.user_ns)
585 585
586 586 # Override terminal size to standardise traceback format
587 587 with modified_env({'COLUMNS': '80', 'LINES': '24'}):
588 588 return super(IPDocTestRunner,self).run(test,
589 589 compileflags,out,clear_globs)
590 590
591 591
592 592 class DocFileCase(doctest.DocFileCase):
593 593 """Overrides to provide filename
594 594 """
595 595 def address(self):
596 596 return (self._dt_test.filename, None, None)
597 597
598 598
599 599 class ExtensionDoctest(doctests.Doctest):
600 600 """Nose Plugin that supports doctests in extension modules.
601 601 """
602 602 name = 'extdoctest' # call nosetests with --with-extdoctest
603 603 enabled = True
604 604
605 605 def options(self, parser, env=os.environ):
606 606 Plugin.options(self, parser, env)
607 607 parser.add_option('--doctest-tests', action='store_true',
608 608 dest='doctest_tests',
609 609 default=env.get('NOSE_DOCTEST_TESTS',True),
610 610 help="Also look for doctests in test modules. "
611 611 "Note that classes, methods and functions should "
612 612 "have either doctests or non-doctest tests, "
613 613 "not both. [NOSE_DOCTEST_TESTS]")
614 614 parser.add_option('--doctest-extension', action="append",
615 615 dest="doctestExtension",
616 616 help="Also look for doctests in files with "
617 617 "this extension [NOSE_DOCTEST_EXTENSION]")
618 618 # Set the default as a list, if given in env; otherwise
619 619 # an additional value set on the command line will cause
620 620 # an error.
621 621 env_setting = env.get('NOSE_DOCTEST_EXTENSION')
622 622 if env_setting is not None:
623 623 parser.set_defaults(doctestExtension=tolist(env_setting))
624 624
625 625
626 626 def configure(self, options, config):
627 627 Plugin.configure(self, options, config)
628 628 # Pull standard doctest plugin out of config; we will do doctesting
629 629 config.plugins.plugins = [p for p in config.plugins.plugins
630 630 if p.name != 'doctest']
631 631 self.doctest_tests = options.doctest_tests
632 632 self.extension = tolist(options.doctestExtension)
633 633
634 634 self.parser = doctest.DocTestParser()
635 635 self.finder = DocTestFinder()
636 636 self.checker = IPDoctestOutputChecker()
637 637 self.globs = None
638 638 self.extraglobs = None
639 639
640 640
641 641 def loadTestsFromExtensionModule(self,filename):
642 642 bpath,mod = os.path.split(filename)
643 643 modname = os.path.splitext(mod)[0]
644 644 try:
645 645 sys.path.append(bpath)
646 646 module = import_module(modname)
647 647 tests = list(self.loadTestsFromModule(module))
648 648 finally:
649 649 sys.path.pop()
650 650 return tests
651 651
652 652 # NOTE: the method below is almost a copy of the original one in nose, with
653 653 # a few modifications to control output checking.
654 654
655 655 def loadTestsFromModule(self, module):
656 656 #print '*** ipdoctest - lTM',module # dbg
657 657
658 658 if not self.matches(module.__name__):
659 659 log.debug("Doctest doesn't want module %s", module)
660 660 return
661 661
662 662 tests = self.finder.find(module,globs=self.globs,
663 663 extraglobs=self.extraglobs)
664 664 if not tests:
665 665 return
666 666
667 667 # always use whitespace and ellipsis options
668 668 optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
669 669
670 670 tests.sort()
671 671 module_file = module.__file__
672 672 if module_file[-4:] in ('.pyc', '.pyo'):
673 673 module_file = module_file[:-1]
674 674 for test in tests:
675 675 if not test.examples:
676 676 continue
677 677 if not test.filename:
678 678 test.filename = module_file
679 679
680 680 yield DocTestCase(test,
681 681 optionflags=optionflags,
682 682 checker=self.checker)
683 683
684 684
685 685 def loadTestsFromFile(self, filename):
686 686 #print "ipdoctest - from file", filename # dbg
687 687 if is_extension_module(filename):
688 688 for t in self.loadTestsFromExtensionModule(filename):
689 689 yield t
690 690 else:
691 691 if self.extension and anyp(filename.endswith, self.extension):
692 692 name = PurePath(filename).name
693 693 doc = Path(filename).read_text()
694 694 test = self.parser.get_doctest(
695 695 doc, globs={'__file__': filename}, name=name,
696 696 filename=filename, lineno=0)
697 697 if test.examples:
698 698 #print 'FileCase:',test.examples # dbg
699 699 yield DocFileCase(test)
700 700 else:
701 701 yield False # no tests to load
702 702
703 703
704 704 class IPythonDoctest(ExtensionDoctest):
705 705 """Nose Plugin that supports doctests in extension modules.
706 706 """
707 707 name = 'ipdoctest' # call nosetests with --with-ipdoctest
708 708 enabled = True
709 709
710 710 def makeTest(self, obj, parent):
711 711 """Look for doctests in the given object, which will be a
712 712 function, method or class.
713 713 """
714 714 #print 'Plugin analyzing:', obj, parent # dbg
715 715 # always use whitespace and ellipsis options
716 716 optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
717 717
718 718 doctests = self.finder.find(obj, module=getmodule(parent))
719 719 if doctests:
720 720 for test in doctests:
721 721 if len(test.examples) == 0:
722 722 continue
723 723
724 724 yield DocTestCase(test, obj=obj,
725 725 optionflags=optionflags,
726 726 checker=self.checker)
727 727
728 728 def options(self, parser, env=os.environ):
729 729 #print "Options for nose plugin:", self.name # dbg
730 730 Plugin.options(self, parser, env)
731 731 parser.add_option('--ipdoctest-tests', action='store_true',
732 732 dest='ipdoctest_tests',
733 733 default=env.get('NOSE_IPDOCTEST_TESTS',True),
734 734 help="Also look for doctests in test modules. "
735 735 "Note that classes, methods and functions should "
736 736 "have either doctests or non-doctest tests, "
737 737 "not both. [NOSE_IPDOCTEST_TESTS]")
738 738 parser.add_option('--ipdoctest-extension', action="append",
739 739 dest="ipdoctest_extension",
740 740 help="Also look for doctests in files with "
741 741 "this extension [NOSE_IPDOCTEST_EXTENSION]")
742 742 # Set the default as a list, if given in env; otherwise
743 743 # an additional value set on the command line will cause
744 744 # an error.
745 745 env_setting = env.get('NOSE_IPDOCTEST_EXTENSION')
746 746 if env_setting is not None:
747 747 parser.set_defaults(ipdoctest_extension=tolist(env_setting))
748 748
749 749 def configure(self, options, config):
750 750 #print "Configuring nose plugin:", self.name # dbg
751 751 Plugin.configure(self, options, config)
752 752 # Pull standard doctest plugin out of config; we will do doctesting
753 753 config.plugins.plugins = [p for p in config.plugins.plugins
754 754 if p.name != 'doctest']
755 755 self.doctest_tests = options.ipdoctest_tests
756 756 self.extension = tolist(options.ipdoctest_extension)
757 757
758 758 self.parser = IPDocTestParser()
759 759 self.finder = DocTestFinder(parser=self.parser)
760 760 self.checker = IPDoctestOutputChecker()
761 761 self.globs = None
762 762 self.extraglobs = None
@@ -1,19 +1,19 b''
1 1 """Decorators marks that a doctest should be skipped.
2 2
3 3 The IPython.testing.decorators module triggers various extra imports, including
4 4 numpy and sympy if they're present. Since this decorator is used in core parts
5 5 of IPython, it's in a separate module so that running IPython doesn't trigger
6 6 those imports."""
7 7
8 8 # Copyright (C) IPython Development Team
9 9 # Distributed under the terms of the Modified BSD License.
10 10
11 11
12 12 def skip_doctest(f):
13 13 """Decorator - mark a function or method for skipping its doctest.
14 14
15 15 This decorator allows you to mark a function whose docstring you wish to
16 16 omit from testing, while preserving the docstring for introspection, help,
17 17 etc."""
18 f.skip_doctest = True
18 f.__skip_doctest__ = True
19 19 return f
General Comments 0
You need to be logged in to leave comments. Login now