##// END OF EJS Templates
Typos found by codespell
Dimitri Papadopoulos -
Show More

The requested changes are too big and content was truncated. Show full diff

@@ -1,2239 +1,2239 b''
1 1 """Completion for IPython.
2 2
3 3 This module started as fork of the rlcompleter module in the Python standard
4 4 library. The original enhancements made to rlcompleter have been sent
5 5 upstream and were accepted as of Python 2.3,
6 6
7 7 This module now support a wide variety of completion mechanism both available
8 8 for normal classic Python code, as well as completer for IPython specific
9 9 Syntax like magics.
10 10
11 11 Latex and Unicode completion
12 12 ============================
13 13
14 14 IPython and compatible frontends not only can complete your code, but can help
15 15 you to input a wide range of characters. In particular we allow you to insert
16 16 a unicode character using the tab completion mechanism.
17 17
18 18 Forward latex/unicode completion
19 19 --------------------------------
20 20
21 21 Forward completion allows you to easily type a unicode character using its latex
22 22 name, or unicode long description. To do so type a backslash follow by the
23 23 relevant name and press tab:
24 24
25 25
26 26 Using latex completion:
27 27
28 28 .. code::
29 29
30 30 \\alpha<tab>
31 31 α
32 32
33 33 or using unicode completion:
34 34
35 35
36 36 .. code::
37 37
38 38 \\GREEK SMALL LETTER ALPHA<tab>
39 39 α
40 40
41 41
42 42 Only valid Python identifiers will complete. Combining characters (like arrow or
43 43 dots) are also available, unlike latex they need to be put after the their
44 44 counterpart that is to say, `F\\\\vec<tab>` is correct, not `\\\\vec<tab>F`.
45 45
46 46 Some browsers are known to display combining characters incorrectly.
47 47
48 48 Backward latex completion
49 49 -------------------------
50 50
51 51 It is sometime challenging to know how to type a character, if you are using
52 52 IPython, or any compatible frontend you can prepend backslash to the character
53 53 and press `<tab>` to expand it to its latex form.
54 54
55 55 .. code::
56 56
57 57 \\α<tab>
58 58 \\alpha
59 59
60 60
61 61 Both forward and backward completions can be deactivated by setting the
62 62 ``Completer.backslash_combining_completions`` option to ``False``.
63 63
64 64
65 65 Experimental
66 66 ============
67 67
68 68 Starting with IPython 6.0, this module can make use of the Jedi library to
69 69 generate completions both using static analysis of the code, and dynamically
70 70 inspecting multiple namespaces. Jedi is an autocompletion and static analysis
71 71 for Python. The APIs attached to this new mechanism is unstable and will
72 72 raise unless use in an :any:`provisionalcompleter` context manager.
73 73
74 74 You will find that the following are experimental:
75 75
76 76 - :any:`provisionalcompleter`
77 77 - :any:`IPCompleter.completions`
78 78 - :any:`Completion`
79 79 - :any:`rectify_completions`
80 80
81 81 .. note::
82 82
83 83 better name for :any:`rectify_completions` ?
84 84
85 85 We welcome any feedback on these new API, and we also encourage you to try this
86 86 module in debug mode (start IPython with ``--Completer.debug=True``) in order
87 87 to have extra logging information if :any:`jedi` is crashing, or if current
88 88 IPython completer pending deprecations are returning results not yet handled
89 89 by :any:`jedi`
90 90
91 91 Using Jedi for tab completion allow snippets like the following to work without
92 92 having to execute any code:
93 93
94 94 >>> myvar = ['hello', 42]
95 95 ... myvar[1].bi<tab>
96 96
97 97 Tab completion will be able to infer that ``myvar[1]`` is a real number without
98 98 executing any code unlike the previously available ``IPCompleter.greedy``
99 99 option.
100 100
101 101 Be sure to update :any:`jedi` to the latest stable version or to try the
102 102 current development version to get better completions.
103 103 """
104 104
105 105
106 106 # Copyright (c) IPython Development Team.
107 107 # Distributed under the terms of the Modified BSD License.
108 108 #
109 109 # Some of this code originated from rlcompleter in the Python standard library
110 110 # Copyright (C) 2001 Python Software Foundation, www.python.org
111 111
112 112
113 113 import builtins as builtin_mod
114 114 import glob
115 115 import inspect
116 116 import itertools
117 117 import keyword
118 118 import os
119 119 import re
120 120 import string
121 121 import sys
122 122 import time
123 123 import unicodedata
124 124 import uuid
125 125 import warnings
126 126 from contextlib import contextmanager
127 127 from importlib import import_module
128 128 from types import SimpleNamespace
129 129 from typing import Iterable, Iterator, List, Tuple, Union, Any, Sequence, Dict, NamedTuple, Pattern, Optional
130 130
131 131 from IPython.core.error import TryNext
132 132 from IPython.core.inputtransformer2 import ESC_MAGIC
133 133 from IPython.core.latex_symbols import latex_symbols, reverse_latex_symbol
134 134 from IPython.core.oinspect import InspectColors
135 135 from IPython.utils import generics
136 136 from IPython.utils.dir2 import dir2, get_real_method
137 137 from IPython.utils.path import ensure_dir_exists
138 138 from IPython.utils.process import arg_split
139 139 from traitlets import Bool, Enum, Int, List as ListTrait, Unicode, default, observe
140 140 from traitlets.config.configurable import Configurable
141 141
142 142 import __main__
143 143
144 144 # skip module docstests
145 145 skip_doctest = True
146 146
147 147 try:
148 148 import jedi
149 149 jedi.settings.case_insensitive_completion = False
150 150 import jedi.api.helpers
151 151 import jedi.api.classes
152 152 JEDI_INSTALLED = True
153 153 except ImportError:
154 154 JEDI_INSTALLED = False
155 155 #-----------------------------------------------------------------------------
156 156 # Globals
157 157 #-----------------------------------------------------------------------------
158 158
159 159 # ranges where we have most of the valid unicode names. We could be more finer
160 # grained but is it worth it for performace While unicode have character in the
161 # rage 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
160 # grained but is it worth it for performance While unicode have character in the
161 # range 0, 0x110000, we seem to have name for about 10% of those. (131808 as I
162 162 # write this). With below range we cover them all, with a density of ~67%
163 163 # biggest next gap we consider only adds up about 1% density and there are 600
164 164 # gaps that would need hard coding.
165 165 _UNICODE_RANGES = [(32, 0x3134b), (0xe0001, 0xe01f0)]
166 166
167 167 # Public API
168 168 __all__ = ['Completer','IPCompleter']
169 169
170 170 if sys.platform == 'win32':
171 171 PROTECTABLES = ' '
172 172 else:
173 173 PROTECTABLES = ' ()[]{}?=\\|;:\'#*"^&'
174 174
175 175 # Protect against returning an enormous number of completions which the frontend
176 176 # may have trouble processing.
177 177 MATCHES_LIMIT = 500
178 178
179 179 _deprecation_readline_sentinel = object()
180 180
181 181
182 182 class ProvisionalCompleterWarning(FutureWarning):
183 183 """
184 184 Exception raise by an experimental feature in this module.
185 185
186 186 Wrap code in :any:`provisionalcompleter` context manager if you
187 187 are certain you want to use an unstable feature.
188 188 """
189 189 pass
190 190
191 191 warnings.filterwarnings('error', category=ProvisionalCompleterWarning)
192 192
193 193 @contextmanager
194 194 def provisionalcompleter(action='ignore'):
195 195 """
196 196 This context manager has to be used in any place where unstable completer
197 197 behavior and API may be called.
198 198
199 199 >>> with provisionalcompleter():
200 200 ... completer.do_experimental_things() # works
201 201
202 202 >>> completer.do_experimental_things() # raises.
203 203
204 204 .. note::
205 205
206 206 Unstable
207 207
208 208 By using this context manager you agree that the API in use may change
209 209 without warning, and that you won't complain if they do so.
210 210
211 211 You also understand that, if the API is not to your liking, you should report
212 212 a bug to explain your use case upstream.
213 213
214 214 We'll be happy to get your feedback, feature requests, and improvements on
215 215 any of the unstable APIs!
216 216 """
217 217 with warnings.catch_warnings():
218 218 warnings.filterwarnings(action, category=ProvisionalCompleterWarning)
219 219 yield
220 220
221 221
222 222 def has_open_quotes(s):
223 223 """Return whether a string has open quotes.
224 224
225 225 This simply counts whether the number of quote characters of either type in
226 226 the string is odd.
227 227
228 228 Returns
229 229 -------
230 230 If there is an open quote, the quote character is returned. Else, return
231 231 False.
232 232 """
233 233 # We check " first, then ', so complex cases with nested quotes will get
234 234 # the " to take precedence.
235 235 if s.count('"') % 2:
236 236 return '"'
237 237 elif s.count("'") % 2:
238 238 return "'"
239 239 else:
240 240 return False
241 241
242 242
243 243 def protect_filename(s, protectables=PROTECTABLES):
244 244 """Escape a string to protect certain characters."""
245 245 if set(s) & set(protectables):
246 246 if sys.platform == "win32":
247 247 return '"' + s + '"'
248 248 else:
249 249 return "".join(("\\" + c if c in protectables else c) for c in s)
250 250 else:
251 251 return s
252 252
253 253
254 254 def expand_user(path:str) -> Tuple[str, bool, str]:
255 255 """Expand ``~``-style usernames in strings.
256 256
257 257 This is similar to :func:`os.path.expanduser`, but it computes and returns
258 258 extra information that will be useful if the input was being used in
259 259 computing completions, and you wish to return the completions with the
260 260 original '~' instead of its expanded value.
261 261
262 262 Parameters
263 263 ----------
264 264 path : str
265 265 String to be expanded. If no ~ is present, the output is the same as the
266 266 input.
267 267
268 268 Returns
269 269 -------
270 270 newpath : str
271 271 Result of ~ expansion in the input path.
272 272 tilde_expand : bool
273 273 Whether any expansion was performed or not.
274 274 tilde_val : str
275 275 The value that ~ was replaced with.
276 276 """
277 277 # Default values
278 278 tilde_expand = False
279 279 tilde_val = ''
280 280 newpath = path
281 281
282 282 if path.startswith('~'):
283 283 tilde_expand = True
284 284 rest = len(path)-1
285 285 newpath = os.path.expanduser(path)
286 286 if rest:
287 287 tilde_val = newpath[:-rest]
288 288 else:
289 289 tilde_val = newpath
290 290
291 291 return newpath, tilde_expand, tilde_val
292 292
293 293
294 294 def compress_user(path:str, tilde_expand:bool, tilde_val:str) -> str:
295 295 """Does the opposite of expand_user, with its outputs.
296 296 """
297 297 if tilde_expand:
298 298 return path.replace(tilde_val, '~')
299 299 else:
300 300 return path
301 301
302 302
303 303 def completions_sorting_key(word):
304 304 """key for sorting completions
305 305
306 306 This does several things:
307 307
308 308 - Demote any completions starting with underscores to the end
309 309 - Insert any %magic and %%cellmagic completions in the alphabetical order
310 310 by their name
311 311 """
312 312 prio1, prio2 = 0, 0
313 313
314 314 if word.startswith('__'):
315 315 prio1 = 2
316 316 elif word.startswith('_'):
317 317 prio1 = 1
318 318
319 319 if word.endswith('='):
320 320 prio1 = -1
321 321
322 322 if word.startswith('%%'):
323 323 # If there's another % in there, this is something else, so leave it alone
324 324 if not "%" in word[2:]:
325 325 word = word[2:]
326 326 prio2 = 2
327 327 elif word.startswith('%'):
328 328 if not "%" in word[1:]:
329 329 word = word[1:]
330 330 prio2 = 1
331 331
332 332 return prio1, word, prio2
333 333
334 334
335 335 class _FakeJediCompletion:
336 336 """
337 337 This is a workaround to communicate to the UI that Jedi has crashed and to
338 338 report a bug. Will be used only id :any:`IPCompleter.debug` is set to true.
339 339
340 340 Added in IPython 6.0 so should likely be removed for 7.0
341 341
342 342 """
343 343
344 344 def __init__(self, name):
345 345
346 346 self.name = name
347 347 self.complete = name
348 348 self.type = 'crashed'
349 349 self.name_with_symbols = name
350 350 self.signature = ''
351 351 self._origin = 'fake'
352 352
353 353 def __repr__(self):
354 354 return '<Fake completion object jedi has crashed>'
355 355
356 356
357 357 class Completion:
358 358 """
359 359 Completion object used and return by IPython completers.
360 360
361 361 .. warning::
362 362
363 363 Unstable
364 364
365 365 This function is unstable, API may change without warning.
366 366 It will also raise unless use in proper context manager.
367 367
368 368 This act as a middle ground :any:`Completion` object between the
369 369 :any:`jedi.api.classes.Completion` object and the Prompt Toolkit completion
370 370 object. While Jedi need a lot of information about evaluator and how the
371 371 code should be ran/inspected, PromptToolkit (and other frontend) mostly
372 372 need user facing information.
373 373
374 374 - Which range should be replaced replaced by what.
375 375 - Some metadata (like completion type), or meta information to displayed to
376 376 the use user.
377 377
378 378 For debugging purpose we can also store the origin of the completion (``jedi``,
379 379 ``IPython.python_matches``, ``IPython.magics_matches``...).
380 380 """
381 381
382 382 __slots__ = ['start', 'end', 'text', 'type', 'signature', '_origin']
383 383
384 384 def __init__(self, start: int, end: int, text: str, *, type: str=None, _origin='', signature='') -> None:
385 385 warnings.warn("``Completion`` is a provisional API (as of IPython 6.0). "
386 386 "It may change without warnings. "
387 387 "Use in corresponding context manager.",
388 388 category=ProvisionalCompleterWarning, stacklevel=2)
389 389
390 390 self.start = start
391 391 self.end = end
392 392 self.text = text
393 393 self.type = type
394 394 self.signature = signature
395 395 self._origin = _origin
396 396
397 397 def __repr__(self):
398 398 return '<Completion start=%s end=%s text=%r type=%r, signature=%r,>' % \
399 399 (self.start, self.end, self.text, self.type or '?', self.signature or '?')
400 400
401 401 def __eq__(self, other)->Bool:
402 402 """
403 403 Equality and hash do not hash the type (as some completer may not be
404 404 able to infer the type), but are use to (partially) de-duplicate
405 405 completion.
406 406
407 407 Completely de-duplicating completion is a bit tricker that just
408 408 comparing as it depends on surrounding text, which Completions are not
409 409 aware of.
410 410 """
411 411 return self.start == other.start and \
412 412 self.end == other.end and \
413 413 self.text == other.text
414 414
415 415 def __hash__(self):
416 416 return hash((self.start, self.end, self.text))
417 417
418 418
419 419 _IC = Iterable[Completion]
420 420
421 421
422 422 def _deduplicate_completions(text: str, completions: _IC)-> _IC:
423 423 """
424 424 Deduplicate a set of completions.
425 425
426 426 .. warning::
427 427
428 428 Unstable
429 429
430 430 This function is unstable, API may change without warning.
431 431
432 432 Parameters
433 433 ----------
434 434 text : str
435 435 text that should be completed.
436 436 completions : Iterator[Completion]
437 437 iterator over the completions to deduplicate
438 438
439 439 Yields
440 440 ------
441 441 `Completions` objects
442 442 Completions coming from multiple sources, may be different but end up having
443 443 the same effect when applied to ``text``. If this is the case, this will
444 444 consider completions as equal and only emit the first encountered.
445 445 Not folded in `completions()` yet for debugging purpose, and to detect when
446 446 the IPython completer does return things that Jedi does not, but should be
447 447 at some point.
448 448 """
449 449 completions = list(completions)
450 450 if not completions:
451 451 return
452 452
453 453 new_start = min(c.start for c in completions)
454 454 new_end = max(c.end for c in completions)
455 455
456 456 seen = set()
457 457 for c in completions:
458 458 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
459 459 if new_text not in seen:
460 460 yield c
461 461 seen.add(new_text)
462 462
463 463
464 464 def rectify_completions(text: str, completions: _IC, *, _debug=False)->_IC:
465 465 """
466 466 Rectify a set of completions to all have the same ``start`` and ``end``
467 467
468 468 .. warning::
469 469
470 470 Unstable
471 471
472 472 This function is unstable, API may change without warning.
473 473 It will also raise unless use in proper context manager.
474 474
475 475 Parameters
476 476 ----------
477 477 text : str
478 478 text that should be completed.
479 479 completions : Iterator[Completion]
480 480 iterator over the completions to rectify
481 481
482 482 Notes
483 483 -----
484 484 :any:`jedi.api.classes.Completion` s returned by Jedi may not have the same start and end, though
485 485 the Jupyter Protocol requires them to behave like so. This will readjust
486 486 the completion to have the same ``start`` and ``end`` by padding both
487 487 extremities with surrounding text.
488 488
489 489 During stabilisation should support a ``_debug`` option to log which
490 490 completion are return by the IPython completer and not found in Jedi in
491 491 order to make upstream bug report.
492 492 """
493 493 warnings.warn("`rectify_completions` is a provisional API (as of IPython 6.0). "
494 494 "It may change without warnings. "
495 495 "Use in corresponding context manager.",
496 496 category=ProvisionalCompleterWarning, stacklevel=2)
497 497
498 498 completions = list(completions)
499 499 if not completions:
500 500 return
501 501 starts = (c.start for c in completions)
502 502 ends = (c.end for c in completions)
503 503
504 504 new_start = min(starts)
505 505 new_end = max(ends)
506 506
507 507 seen_jedi = set()
508 508 seen_python_matches = set()
509 509 for c in completions:
510 510 new_text = text[new_start:c.start] + c.text + text[c.end:new_end]
511 511 if c._origin == 'jedi':
512 512 seen_jedi.add(new_text)
513 513 elif c._origin == 'IPCompleter.python_matches':
514 514 seen_python_matches.add(new_text)
515 515 yield Completion(new_start, new_end, new_text, type=c.type, _origin=c._origin, signature=c.signature)
516 516 diff = seen_python_matches.difference(seen_jedi)
517 517 if diff and _debug:
518 518 print('IPython.python matches have extras:', diff)
519 519
520 520
521 521 if sys.platform == 'win32':
522 522 DELIMS = ' \t\n`!@#$^&*()=+[{]}|;\'",<>?'
523 523 else:
524 524 DELIMS = ' \t\n`!@#$^&*()=+[{]}\\|;:\'",<>?'
525 525
526 526 GREEDY_DELIMS = ' =\r\n'
527 527
528 528
529 529 class CompletionSplitter(object):
530 530 """An object to split an input line in a manner similar to readline.
531 531
532 532 By having our own implementation, we can expose readline-like completion in
533 533 a uniform manner to all frontends. This object only needs to be given the
534 534 line of text to be split and the cursor position on said line, and it
535 535 returns the 'word' to be completed on at the cursor after splitting the
536 536 entire line.
537 537
538 538 What characters are used as splitting delimiters can be controlled by
539 539 setting the ``delims`` attribute (this is a property that internally
540 540 automatically builds the necessary regular expression)"""
541 541
542 542 # Private interface
543 543
544 544 # A string of delimiter characters. The default value makes sense for
545 545 # IPython's most typical usage patterns.
546 546 _delims = DELIMS
547 547
548 548 # The expression (a normal string) to be compiled into a regular expression
549 549 # for actual splitting. We store it as an attribute mostly for ease of
550 550 # debugging, since this type of code can be so tricky to debug.
551 551 _delim_expr = None
552 552
553 553 # The regular expression that does the actual splitting
554 554 _delim_re = None
555 555
556 556 def __init__(self, delims=None):
557 557 delims = CompletionSplitter._delims if delims is None else delims
558 558 self.delims = delims
559 559
560 560 @property
561 561 def delims(self):
562 562 """Return the string of delimiter characters."""
563 563 return self._delims
564 564
565 565 @delims.setter
566 566 def delims(self, delims):
567 567 """Set the delimiters for line splitting."""
568 568 expr = '[' + ''.join('\\'+ c for c in delims) + ']'
569 569 self._delim_re = re.compile(expr)
570 570 self._delims = delims
571 571 self._delim_expr = expr
572 572
573 573 def split_line(self, line, cursor_pos=None):
574 574 """Split a line of text with a cursor at the given position.
575 575 """
576 576 l = line if cursor_pos is None else line[:cursor_pos]
577 577 return self._delim_re.split(l)[-1]
578 578
579 579
580 580
581 581 class Completer(Configurable):
582 582
583 583 greedy = Bool(False,
584 584 help="""Activate greedy completion
585 585 PENDING DEPRECTION. this is now mostly taken care of with Jedi.
586 586
587 587 This will enable completion on elements of lists, results of function calls, etc.,
588 588 but can be unsafe because the code is actually evaluated on TAB.
589 589 """
590 590 ).tag(config=True)
591 591
592 592 use_jedi = Bool(default_value=JEDI_INSTALLED,
593 593 help="Experimental: Use Jedi to generate autocompletions. "
594 594 "Default to True if jedi is installed.").tag(config=True)
595 595
596 596 jedi_compute_type_timeout = Int(default_value=400,
597 597 help="""Experimental: restrict time (in milliseconds) during which Jedi can compute types.
598 598 Set to 0 to stop computing types. Non-zero value lower than 100ms may hurt
599 599 performance by preventing jedi to build its cache.
600 600 """).tag(config=True)
601 601
602 602 debug = Bool(default_value=False,
603 603 help='Enable debug for the Completer. Mostly print extra '
604 604 'information for experimental jedi integration.')\
605 605 .tag(config=True)
606 606
607 607 backslash_combining_completions = Bool(True,
608 608 help="Enable unicode completions, e.g. \\alpha<tab> . "
609 609 "Includes completion of latex commands, unicode names, and expanding "
610 610 "unicode characters back to latex commands.").tag(config=True)
611 611
612 612
613 613
614 614 def __init__(self, namespace=None, global_namespace=None, **kwargs):
615 615 """Create a new completer for the command line.
616 616
617 617 Completer(namespace=ns, global_namespace=ns2) -> completer instance.
618 618
619 619 If unspecified, the default namespace where completions are performed
620 620 is __main__ (technically, __main__.__dict__). Namespaces should be
621 621 given as dictionaries.
622 622
623 623 An optional second namespace can be given. This allows the completer
624 624 to handle cases where both the local and global scopes need to be
625 625 distinguished.
626 626 """
627 627
628 628 # Don't bind to namespace quite yet, but flag whether the user wants a
629 629 # specific namespace or to use __main__.__dict__. This will allow us
630 630 # to bind to __main__.__dict__ at completion time, not now.
631 631 if namespace is None:
632 632 self.use_main_ns = True
633 633 else:
634 634 self.use_main_ns = False
635 635 self.namespace = namespace
636 636
637 637 # The global namespace, if given, can be bound directly
638 638 if global_namespace is None:
639 639 self.global_namespace = {}
640 640 else:
641 641 self.global_namespace = global_namespace
642 642
643 643 self.custom_matchers = []
644 644
645 645 super(Completer, self).__init__(**kwargs)
646 646
647 647 def complete(self, text, state):
648 648 """Return the next possible completion for 'text'.
649 649
650 650 This is called successively with state == 0, 1, 2, ... until it
651 651 returns None. The completion should begin with 'text'.
652 652
653 653 """
654 654 if self.use_main_ns:
655 655 self.namespace = __main__.__dict__
656 656
657 657 if state == 0:
658 658 if "." in text:
659 659 self.matches = self.attr_matches(text)
660 660 else:
661 661 self.matches = self.global_matches(text)
662 662 try:
663 663 return self.matches[state]
664 664 except IndexError:
665 665 return None
666 666
667 667 def global_matches(self, text):
668 668 """Compute matches when text is a simple name.
669 669
670 670 Return a list of all keywords, built-in functions and names currently
671 671 defined in self.namespace or self.global_namespace that match.
672 672
673 673 """
674 674 matches = []
675 675 match_append = matches.append
676 676 n = len(text)
677 677 for lst in [keyword.kwlist,
678 678 builtin_mod.__dict__.keys(),
679 679 self.namespace.keys(),
680 680 self.global_namespace.keys()]:
681 681 for word in lst:
682 682 if word[:n] == text and word != "__builtins__":
683 683 match_append(word)
684 684
685 685 snake_case_re = re.compile(r"[^_]+(_[^_]+)+?\Z")
686 686 for lst in [self.namespace.keys(),
687 687 self.global_namespace.keys()]:
688 688 shortened = {"_".join([sub[0] for sub in word.split('_')]) : word
689 689 for word in lst if snake_case_re.match(word)}
690 690 for word in shortened.keys():
691 691 if word[:n] == text and word != "__builtins__":
692 692 match_append(shortened[word])
693 693 return matches
694 694
695 695 def attr_matches(self, text):
696 696 """Compute matches when text contains a dot.
697 697
698 698 Assuming the text is of the form NAME.NAME....[NAME], and is
699 699 evaluatable in self.namespace or self.global_namespace, it will be
700 700 evaluated and its attributes (as revealed by dir()) are used as
701 701 possible completions. (For class instances, class members are
702 702 also considered.)
703 703
704 704 WARNING: this can still invoke arbitrary C code, if an object
705 705 with a __getattr__ hook is evaluated.
706 706
707 707 """
708 708
709 709 # Another option, seems to work great. Catches things like ''.<tab>
710 710 m = re.match(r"(\S+(\.\w+)*)\.(\w*)$", text)
711 711
712 712 if m:
713 713 expr, attr = m.group(1, 3)
714 714 elif self.greedy:
715 715 m2 = re.match(r"(.+)\.(\w*)$", self.line_buffer)
716 716 if not m2:
717 717 return []
718 718 expr, attr = m2.group(1,2)
719 719 else:
720 720 return []
721 721
722 722 try:
723 723 obj = eval(expr, self.namespace)
724 724 except:
725 725 try:
726 726 obj = eval(expr, self.global_namespace)
727 727 except:
728 728 return []
729 729
730 730 if self.limit_to__all__ and hasattr(obj, '__all__'):
731 731 words = get__all__entries(obj)
732 732 else:
733 733 words = dir2(obj)
734 734
735 735 try:
736 736 words = generics.complete_object(obj, words)
737 737 except TryNext:
738 738 pass
739 739 except AssertionError:
740 740 raise
741 741 except Exception:
742 742 # Silence errors from completion function
743 743 #raise # dbg
744 744 pass
745 745 # Build match list to return
746 746 n = len(attr)
747 747 return [u"%s.%s" % (expr, w) for w in words if w[:n] == attr ]
748 748
749 749
750 750 def get__all__entries(obj):
751 751 """returns the strings in the __all__ attribute"""
752 752 try:
753 753 words = getattr(obj, '__all__')
754 754 except:
755 755 return []
756 756
757 757 return [w for w in words if isinstance(w, str)]
758 758
759 759
760 760 def match_dict_keys(keys: List[Union[str, bytes, Tuple[Union[str, bytes]]]], prefix: str, delims: str,
761 761 extra_prefix: Optional[Tuple[str, bytes]]=None) -> Tuple[str, int, List[str]]:
762 762 """Used by dict_key_matches, matching the prefix to a list of keys
763 763
764 764 Parameters
765 765 ----------
766 766 keys
767 767 list of keys in dictionary currently being completed.
768 768 prefix
769 769 Part of the text already typed by the user. E.g. `mydict[b'fo`
770 770 delims
771 771 String of delimiters to consider when finding the current key.
772 772 extra_prefix : optional
773 773 Part of the text already typed in multi-key index cases. E.g. for
774 774 `mydict['foo', "bar", 'b`, this would be `('foo', 'bar')`.
775 775
776 776 Returns
777 777 -------
778 778 A tuple of three elements: ``quote``, ``token_start``, ``matched``, with
779 779 ``quote`` being the quote that need to be used to close current string.
780 780 ``token_start`` the position where the replacement should start occurring,
781 781 ``matches`` a list of replacement/completion
782 782
783 783 """
784 784 prefix_tuple = extra_prefix if extra_prefix else ()
785 785 Nprefix = len(prefix_tuple)
786 786 def filter_prefix_tuple(key):
787 787 # Reject too short keys
788 788 if len(key) <= Nprefix:
789 789 return False
790 790 # Reject keys with non str/bytes in it
791 791 for k in key:
792 792 if not isinstance(k, (str, bytes)):
793 793 return False
794 794 # Reject keys that do not match the prefix
795 795 for k, pt in zip(key, prefix_tuple):
796 796 if k != pt:
797 797 return False
798 798 # All checks passed!
799 799 return True
800 800
801 801 filtered_keys:List[Union[str,bytes]] = []
802 802 def _add_to_filtered_keys(key):
803 803 if isinstance(key, (str, bytes)):
804 804 filtered_keys.append(key)
805 805
806 806 for k in keys:
807 807 if isinstance(k, tuple):
808 808 if filter_prefix_tuple(k):
809 809 _add_to_filtered_keys(k[Nprefix])
810 810 else:
811 811 _add_to_filtered_keys(k)
812 812
813 813 if not prefix:
814 814 return '', 0, [repr(k) for k in filtered_keys]
815 815 quote_match = re.search('["\']', prefix)
816 816 assert quote_match is not None # silence mypy
817 817 quote = quote_match.group()
818 818 try:
819 819 prefix_str = eval(prefix + quote, {})
820 820 except Exception:
821 821 return '', 0, []
822 822
823 823 pattern = '[^' + ''.join('\\' + c for c in delims) + ']*$'
824 824 token_match = re.search(pattern, prefix, re.UNICODE)
825 825 assert token_match is not None # silence mypy
826 826 token_start = token_match.start()
827 827 token_prefix = token_match.group()
828 828
829 829 matched:List[str] = []
830 830 for key in filtered_keys:
831 831 try:
832 832 if not key.startswith(prefix_str):
833 833 continue
834 834 except (AttributeError, TypeError, UnicodeError):
835 835 # Python 3+ TypeError on b'a'.startswith('a') or vice-versa
836 836 continue
837 837
838 838 # reformat remainder of key to begin with prefix
839 839 rem = key[len(prefix_str):]
840 840 # force repr wrapped in '
841 841 rem_repr = repr(rem + '"') if isinstance(rem, str) else repr(rem + b'"')
842 842 rem_repr = rem_repr[1 + rem_repr.index("'"):-2]
843 843 if quote == '"':
844 844 # The entered prefix is quoted with ",
845 845 # but the match is quoted with '.
846 846 # A contained " hence needs escaping for comparison:
847 847 rem_repr = rem_repr.replace('"', '\\"')
848 848
849 849 # then reinsert prefix from start of token
850 850 matched.append('%s%s' % (token_prefix, rem_repr))
851 851 return quote, token_start, matched
852 852
853 853
854 854 def cursor_to_position(text:str, line:int, column:int)->int:
855 855 """
856 856 Convert the (line,column) position of the cursor in text to an offset in a
857 857 string.
858 858
859 859 Parameters
860 860 ----------
861 861 text : str
862 862 The text in which to calculate the cursor offset
863 863 line : int
864 864 Line of the cursor; 0-indexed
865 865 column : int
866 866 Column of the cursor 0-indexed
867 867
868 868 Returns
869 869 -------
870 870 Position of the cursor in ``text``, 0-indexed.
871 871
872 872 See Also
873 873 --------
874 874 position_to_cursor : reciprocal of this function
875 875
876 876 """
877 877 lines = text.split('\n')
878 878 assert line <= len(lines), '{} <= {}'.format(str(line), str(len(lines)))
879 879
880 880 return sum(len(l) + 1 for l in lines[:line]) + column
881 881
882 882 def position_to_cursor(text:str, offset:int)->Tuple[int, int]:
883 883 """
884 884 Convert the position of the cursor in text (0 indexed) to a line
885 885 number(0-indexed) and a column number (0-indexed) pair
886 886
887 887 Position should be a valid position in ``text``.
888 888
889 889 Parameters
890 890 ----------
891 891 text : str
892 892 The text in which to calculate the cursor offset
893 893 offset : int
894 894 Position of the cursor in ``text``, 0-indexed.
895 895
896 896 Returns
897 897 -------
898 898 (line, column) : (int, int)
899 899 Line of the cursor; 0-indexed, column of the cursor 0-indexed
900 900
901 901 See Also
902 902 --------
903 903 cursor_to_position : reciprocal of this function
904 904
905 905 """
906 906
907 907 assert 0 <= offset <= len(text) , "0 <= %s <= %s" % (offset , len(text))
908 908
909 909 before = text[:offset]
910 910 blines = before.split('\n') # ! splitnes trim trailing \n
911 911 line = before.count('\n')
912 912 col = len(blines[-1])
913 913 return line, col
914 914
915 915
916 916 def _safe_isinstance(obj, module, class_name):
917 917 """Checks if obj is an instance of module.class_name if loaded
918 918 """
919 919 return (module in sys.modules and
920 920 isinstance(obj, getattr(import_module(module), class_name)))
921 921
922 922 def back_unicode_name_matches(text:str) -> Tuple[str, Sequence[str]]:
923 923 """Match Unicode characters back to Unicode name
924 924
925 925 This does ``☃`` -> ``\\snowman``
926 926
927 927 Note that snowman is not a valid python3 combining character but will be expanded.
928 928 Though it will not recombine back to the snowman character by the completion machinery.
929 929
930 930 This will not either back-complete standard sequences like \\n, \\b ...
931 931
932 932 Returns
933 933 =======
934 934
935 935 Return a tuple with two elements:
936 936
937 937 - The Unicode character that was matched (preceded with a backslash), or
938 938 empty string,
939 939 - a sequence (of 1), name for the match Unicode character, preceded by
940 940 backslash, or empty if no match.
941 941
942 942 """
943 943 if len(text)<2:
944 944 return '', ()
945 945 maybe_slash = text[-2]
946 946 if maybe_slash != '\\':
947 947 return '', ()
948 948
949 949 char = text[-1]
950 950 # no expand on quote for completion in strings.
951 951 # nor backcomplete standard ascii keys
952 952 if char in string.ascii_letters or char in ('"',"'"):
953 953 return '', ()
954 954 try :
955 955 unic = unicodedata.name(char)
956 956 return '\\'+char,('\\'+unic,)
957 957 except KeyError:
958 958 pass
959 959 return '', ()
960 960
961 961 def back_latex_name_matches(text:str) -> Tuple[str, Sequence[str]] :
962 962 """Match latex characters back to unicode name
963 963
964 964 This does ``\\ℵ`` -> ``\\aleph``
965 965
966 966 """
967 967 if len(text)<2:
968 968 return '', ()
969 969 maybe_slash = text[-2]
970 970 if maybe_slash != '\\':
971 971 return '', ()
972 972
973 973
974 974 char = text[-1]
975 975 # no expand on quote for completion in strings.
976 976 # nor backcomplete standard ascii keys
977 977 if char in string.ascii_letters or char in ('"',"'"):
978 978 return '', ()
979 979 try :
980 980 latex = reverse_latex_symbol[char]
981 981 # '\\' replace the \ as well
982 982 return '\\'+char,[latex]
983 983 except KeyError:
984 984 pass
985 985 return '', ()
986 986
987 987
988 988 def _formatparamchildren(parameter) -> str:
989 989 """
990 990 Get parameter name and value from Jedi Private API
991 991
992 992 Jedi does not expose a simple way to get `param=value` from its API.
993 993
994 994 Parameters
995 995 ----------
996 996 parameter
997 997 Jedi's function `Param`
998 998
999 999 Returns
1000 1000 -------
1001 1001 A string like 'a', 'b=1', '*args', '**kwargs'
1002 1002
1003 1003 """
1004 1004 description = parameter.description
1005 1005 if not description.startswith('param '):
1006 1006 raise ValueError('Jedi function parameter description have change format.'
1007 1007 'Expected "param ...", found %r".' % description)
1008 1008 return description[6:]
1009 1009
1010 1010 def _make_signature(completion)-> str:
1011 1011 """
1012 1012 Make the signature from a jedi completion
1013 1013
1014 1014 Parameters
1015 1015 ----------
1016 1016 completion : jedi.Completion
1017 1017 object does not complete a function type
1018 1018
1019 1019 Returns
1020 1020 -------
1021 1021 a string consisting of the function signature, with the parenthesis but
1022 1022 without the function name. example:
1023 1023 `(a, *args, b=1, **kwargs)`
1024 1024
1025 1025 """
1026 1026
1027 1027 # it looks like this might work on jedi 0.17
1028 1028 if hasattr(completion, 'get_signatures'):
1029 1029 signatures = completion.get_signatures()
1030 1030 if not signatures:
1031 1031 return '(?)'
1032 1032
1033 1033 c0 = completion.get_signatures()[0]
1034 1034 return '('+c0.to_string().split('(', maxsplit=1)[1]
1035 1035
1036 1036 return '(%s)'% ', '.join([f for f in (_formatparamchildren(p) for signature in completion.get_signatures()
1037 1037 for p in signature.defined_names()) if f])
1038 1038
1039 1039
1040 1040 class _CompleteResult(NamedTuple):
1041 1041 matched_text : str
1042 1042 matches: Sequence[str]
1043 1043 matches_origin: Sequence[str]
1044 1044 jedi_matches: Any
1045 1045
1046 1046
1047 1047 class IPCompleter(Completer):
1048 1048 """Extension of the completer class with IPython-specific features"""
1049 1049
1050 1050 __dict_key_regexps: Optional[Dict[bool,Pattern]] = None
1051 1051
1052 1052 @observe('greedy')
1053 1053 def _greedy_changed(self, change):
1054 1054 """update the splitter and readline delims when greedy is changed"""
1055 1055 if change['new']:
1056 1056 self.splitter.delims = GREEDY_DELIMS
1057 1057 else:
1058 1058 self.splitter.delims = DELIMS
1059 1059
1060 1060 dict_keys_only = Bool(False,
1061 1061 help="""Whether to show dict key matches only""")
1062 1062
1063 1063 merge_completions = Bool(True,
1064 1064 help="""Whether to merge completion results into a single list
1065 1065
1066 1066 If False, only the completion results from the first non-empty
1067 1067 completer will be returned.
1068 1068 """
1069 1069 ).tag(config=True)
1070 1070 omit__names = Enum((0,1,2), default_value=2,
1071 1071 help="""Instruct the completer to omit private method names
1072 1072
1073 1073 Specifically, when completing on ``object.<tab>``.
1074 1074
1075 1075 When 2 [default]: all names that start with '_' will be excluded.
1076 1076
1077 1077 When 1: all 'magic' names (``__foo__``) will be excluded.
1078 1078
1079 1079 When 0: nothing will be excluded.
1080 1080 """
1081 1081 ).tag(config=True)
1082 1082 limit_to__all__ = Bool(False,
1083 1083 help="""
1084 1084 DEPRECATED as of version 5.0.
1085 1085
1086 1086 Instruct the completer to use __all__ for the completion
1087 1087
1088 1088 Specifically, when completing on ``object.<tab>``.
1089 1089
1090 1090 When True: only those names in obj.__all__ will be included.
1091 1091
1092 1092 When False [default]: the __all__ attribute is ignored
1093 1093 """,
1094 1094 ).tag(config=True)
1095 1095
1096 1096 profile_completions = Bool(
1097 1097 default_value=False,
1098 1098 help="If True, emit profiling data for completion subsystem using cProfile."
1099 1099 ).tag(config=True)
1100 1100
1101 1101 profiler_output_dir = Unicode(
1102 1102 default_value=".completion_profiles",
1103 1103 help="Template for path at which to output profile data for completions."
1104 1104 ).tag(config=True)
1105 1105
1106 1106 @observe('limit_to__all__')
1107 1107 def _limit_to_all_changed(self, change):
1108 1108 warnings.warn('`IPython.core.IPCompleter.limit_to__all__` configuration '
1109 1109 'value has been deprecated since IPython 5.0, will be made to have '
1110 1110 'no effects and then removed in future version of IPython.',
1111 1111 UserWarning)
1112 1112
1113 1113 def __init__(self, shell=None, namespace=None, global_namespace=None,
1114 1114 use_readline=_deprecation_readline_sentinel, config=None, **kwargs):
1115 1115 """IPCompleter() -> completer
1116 1116
1117 1117 Return a completer object.
1118 1118
1119 1119 Parameters
1120 1120 ----------
1121 1121 shell
1122 1122 a pointer to the ipython shell itself. This is needed
1123 1123 because this completer knows about magic functions, and those can
1124 1124 only be accessed via the ipython instance.
1125 1125 namespace : dict, optional
1126 1126 an optional dict where completions are performed.
1127 1127 global_namespace : dict, optional
1128 1128 secondary optional dict for completions, to
1129 1129 handle cases (such as IPython embedded inside functions) where
1130 1130 both Python scopes are visible.
1131 1131 use_readline : bool, optional
1132 1132 DEPRECATED, ignored since IPython 6.0, will have no effects
1133 1133 """
1134 1134
1135 1135 self.magic_escape = ESC_MAGIC
1136 1136 self.splitter = CompletionSplitter()
1137 1137
1138 1138 if use_readline is not _deprecation_readline_sentinel:
1139 1139 warnings.warn('The `use_readline` parameter is deprecated and ignored since IPython 6.0.',
1140 1140 DeprecationWarning, stacklevel=2)
1141 1141
1142 1142 # _greedy_changed() depends on splitter and readline being defined:
1143 1143 Completer.__init__(self, namespace=namespace, global_namespace=global_namespace,
1144 1144 config=config, **kwargs)
1145 1145
1146 1146 # List where completion matches will be stored
1147 1147 self.matches = []
1148 1148 self.shell = shell
1149 1149 # Regexp to split filenames with spaces in them
1150 1150 self.space_name_re = re.compile(r'([^\\] )')
1151 1151 # Hold a local ref. to glob.glob for speed
1152 1152 self.glob = glob.glob
1153 1153
1154 1154 # Determine if we are running on 'dumb' terminals, like (X)Emacs
1155 1155 # buffers, to avoid completion problems.
1156 1156 term = os.environ.get('TERM','xterm')
1157 1157 self.dumb_terminal = term in ['dumb','emacs']
1158 1158
1159 1159 # Special handling of backslashes needed in win32 platforms
1160 1160 if sys.platform == "win32":
1161 1161 self.clean_glob = self._clean_glob_win32
1162 1162 else:
1163 1163 self.clean_glob = self._clean_glob
1164 1164
1165 1165 #regexp to parse docstring for function signature
1166 1166 self.docstring_sig_re = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1167 1167 self.docstring_kwd_re = re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1168 1168 #use this if positional argument name is also needed
1169 1169 #= re.compile(r'[\s|\[]*(\w+)(?:\s*=?\s*.*)')
1170 1170
1171 1171 self.magic_arg_matchers = [
1172 1172 self.magic_config_matches,
1173 1173 self.magic_color_matches,
1174 1174 ]
1175 1175
1176 1176 # This is set externally by InteractiveShell
1177 1177 self.custom_completers = None
1178 1178
1179 1179 # This is a list of names of unicode characters that can be completed
1180 1180 # into their corresponding unicode value. The list is large, so we
1181 1181 # laziliy initialize it on first use. Consuming code should access this
1182 1182 # attribute through the `@unicode_names` property.
1183 1183 self._unicode_names = None
1184 1184
1185 1185 @property
1186 1186 def matchers(self) -> List[Any]:
1187 1187 """All active matcher routines for completion"""
1188 1188 if self.dict_keys_only:
1189 1189 return [self.dict_key_matches]
1190 1190
1191 1191 if self.use_jedi:
1192 1192 return [
1193 1193 *self.custom_matchers,
1194 1194 self.file_matches,
1195 1195 self.magic_matches,
1196 1196 self.dict_key_matches,
1197 1197 ]
1198 1198 else:
1199 1199 return [
1200 1200 *self.custom_matchers,
1201 1201 self.python_matches,
1202 1202 self.file_matches,
1203 1203 self.magic_matches,
1204 1204 self.python_func_kw_matches,
1205 1205 self.dict_key_matches,
1206 1206 ]
1207 1207
1208 1208 def all_completions(self, text:str) -> List[str]:
1209 1209 """
1210 1210 Wrapper around the completion methods for the benefit of emacs.
1211 1211 """
1212 1212 prefix = text.rpartition('.')[0]
1213 1213 with provisionalcompleter():
1214 1214 return ['.'.join([prefix, c.text]) if prefix and self.use_jedi else c.text
1215 1215 for c in self.completions(text, len(text))]
1216 1216
1217 1217 return self.complete(text)[1]
1218 1218
1219 1219 def _clean_glob(self, text:str):
1220 1220 return self.glob("%s*" % text)
1221 1221
1222 1222 def _clean_glob_win32(self, text:str):
1223 1223 return [f.replace("\\","/")
1224 1224 for f in self.glob("%s*" % text)]
1225 1225
1226 1226 def file_matches(self, text:str)->List[str]:
1227 1227 """Match filenames, expanding ~USER type strings.
1228 1228
1229 1229 Most of the seemingly convoluted logic in this completer is an
1230 1230 attempt to handle filenames with spaces in them. And yet it's not
1231 1231 quite perfect, because Python's readline doesn't expose all of the
1232 1232 GNU readline details needed for this to be done correctly.
1233 1233
1234 1234 For a filename with a space in it, the printed completions will be
1235 1235 only the parts after what's already been typed (instead of the
1236 1236 full completions, as is normally done). I don't think with the
1237 1237 current (as of Python 2.3) Python readline it's possible to do
1238 1238 better."""
1239 1239
1240 1240 # chars that require escaping with backslash - i.e. chars
1241 1241 # that readline treats incorrectly as delimiters, but we
1242 1242 # don't want to treat as delimiters in filename matching
1243 1243 # when escaped with backslash
1244 1244 if text.startswith('!'):
1245 1245 text = text[1:]
1246 1246 text_prefix = u'!'
1247 1247 else:
1248 1248 text_prefix = u''
1249 1249
1250 1250 text_until_cursor = self.text_until_cursor
1251 1251 # track strings with open quotes
1252 1252 open_quotes = has_open_quotes(text_until_cursor)
1253 1253
1254 1254 if '(' in text_until_cursor or '[' in text_until_cursor:
1255 1255 lsplit = text
1256 1256 else:
1257 1257 try:
1258 1258 # arg_split ~ shlex.split, but with unicode bugs fixed by us
1259 1259 lsplit = arg_split(text_until_cursor)[-1]
1260 1260 except ValueError:
1261 1261 # typically an unmatched ", or backslash without escaped char.
1262 1262 if open_quotes:
1263 1263 lsplit = text_until_cursor.split(open_quotes)[-1]
1264 1264 else:
1265 1265 return []
1266 1266 except IndexError:
1267 1267 # tab pressed on empty line
1268 1268 lsplit = ""
1269 1269
1270 1270 if not open_quotes and lsplit != protect_filename(lsplit):
1271 1271 # if protectables are found, do matching on the whole escaped name
1272 1272 has_protectables = True
1273 1273 text0,text = text,lsplit
1274 1274 else:
1275 1275 has_protectables = False
1276 1276 text = os.path.expanduser(text)
1277 1277
1278 1278 if text == "":
1279 1279 return [text_prefix + protect_filename(f) for f in self.glob("*")]
1280 1280
1281 1281 # Compute the matches from the filesystem
1282 1282 if sys.platform == 'win32':
1283 1283 m0 = self.clean_glob(text)
1284 1284 else:
1285 1285 m0 = self.clean_glob(text.replace('\\', ''))
1286 1286
1287 1287 if has_protectables:
1288 1288 # If we had protectables, we need to revert our changes to the
1289 1289 # beginning of filename so that we don't double-write the part
1290 1290 # of the filename we have so far
1291 1291 len_lsplit = len(lsplit)
1292 1292 matches = [text_prefix + text0 +
1293 1293 protect_filename(f[len_lsplit:]) for f in m0]
1294 1294 else:
1295 1295 if open_quotes:
1296 1296 # if we have a string with an open quote, we don't need to
1297 1297 # protect the names beyond the quote (and we _shouldn't_, as
1298 1298 # it would cause bugs when the filesystem call is made).
1299 1299 matches = m0 if sys.platform == "win32" else\
1300 1300 [protect_filename(f, open_quotes) for f in m0]
1301 1301 else:
1302 1302 matches = [text_prefix +
1303 1303 protect_filename(f) for f in m0]
1304 1304
1305 1305 # Mark directories in input list by appending '/' to their names.
1306 1306 return [x+'/' if os.path.isdir(x) else x for x in matches]
1307 1307
1308 1308 def magic_matches(self, text:str):
1309 1309 """Match magics"""
1310 1310 # Get all shell magics now rather than statically, so magics loaded at
1311 1311 # runtime show up too.
1312 1312 lsm = self.shell.magics_manager.lsmagic()
1313 1313 line_magics = lsm['line']
1314 1314 cell_magics = lsm['cell']
1315 1315 pre = self.magic_escape
1316 1316 pre2 = pre+pre
1317 1317
1318 1318 explicit_magic = text.startswith(pre)
1319 1319
1320 1320 # Completion logic:
1321 1321 # - user gives %%: only do cell magics
1322 1322 # - user gives %: do both line and cell magics
1323 1323 # - no prefix: do both
1324 1324 # In other words, line magics are skipped if the user gives %% explicitly
1325 1325 #
1326 1326 # We also exclude magics that match any currently visible names:
1327 1327 # https://github.com/ipython/ipython/issues/4877, unless the user has
1328 1328 # typed a %:
1329 1329 # https://github.com/ipython/ipython/issues/10754
1330 1330 bare_text = text.lstrip(pre)
1331 1331 global_matches = self.global_matches(bare_text)
1332 1332 if not explicit_magic:
1333 1333 def matches(magic):
1334 1334 """
1335 1335 Filter magics, in particular remove magics that match
1336 1336 a name present in global namespace.
1337 1337 """
1338 1338 return ( magic.startswith(bare_text) and
1339 1339 magic not in global_matches )
1340 1340 else:
1341 1341 def matches(magic):
1342 1342 return magic.startswith(bare_text)
1343 1343
1344 1344 comp = [ pre2+m for m in cell_magics if matches(m)]
1345 1345 if not text.startswith(pre2):
1346 1346 comp += [ pre+m for m in line_magics if matches(m)]
1347 1347
1348 1348 return comp
1349 1349
1350 1350 def magic_config_matches(self, text:str) -> List[str]:
1351 1351 """ Match class names and attributes for %config magic """
1352 1352 texts = text.strip().split()
1353 1353
1354 1354 if len(texts) > 0 and (texts[0] == 'config' or texts[0] == '%config'):
1355 1355 # get all configuration classes
1356 1356 classes = sorted(set([ c for c in self.shell.configurables
1357 1357 if c.__class__.class_traits(config=True)
1358 1358 ]), key=lambda x: x.__class__.__name__)
1359 1359 classnames = [ c.__class__.__name__ for c in classes ]
1360 1360
1361 1361 # return all classnames if config or %config is given
1362 1362 if len(texts) == 1:
1363 1363 return classnames
1364 1364
1365 1365 # match classname
1366 1366 classname_texts = texts[1].split('.')
1367 1367 classname = classname_texts[0]
1368 1368 classname_matches = [ c for c in classnames
1369 1369 if c.startswith(classname) ]
1370 1370
1371 1371 # return matched classes or the matched class with attributes
1372 1372 if texts[1].find('.') < 0:
1373 1373 return classname_matches
1374 1374 elif len(classname_matches) == 1 and \
1375 1375 classname_matches[0] == classname:
1376 1376 cls = classes[classnames.index(classname)].__class__
1377 1377 help = cls.class_get_help()
1378 1378 # strip leading '--' from cl-args:
1379 1379 help = re.sub(re.compile(r'^--', re.MULTILINE), '', help)
1380 1380 return [ attr.split('=')[0]
1381 1381 for attr in help.strip().splitlines()
1382 1382 if attr.startswith(texts[1]) ]
1383 1383 return []
1384 1384
1385 1385 def magic_color_matches(self, text:str) -> List[str] :
1386 1386 """ Match color schemes for %colors magic"""
1387 1387 texts = text.split()
1388 1388 if text.endswith(' '):
1389 1389 # .split() strips off the trailing whitespace. Add '' back
1390 1390 # so that: '%colors ' -> ['%colors', '']
1391 1391 texts.append('')
1392 1392
1393 1393 if len(texts) == 2 and (texts[0] == 'colors' or texts[0] == '%colors'):
1394 1394 prefix = texts[1]
1395 1395 return [ color for color in InspectColors.keys()
1396 1396 if color.startswith(prefix) ]
1397 1397 return []
1398 1398
1399 1399 def _jedi_matches(self, cursor_column:int, cursor_line:int, text:str) -> Iterable[Any]:
1400 1400 """
1401 1401 Return a list of :any:`jedi.api.Completions` object from a ``text`` and
1402 1402 cursor position.
1403 1403
1404 1404 Parameters
1405 1405 ----------
1406 1406 cursor_column : int
1407 1407 column position of the cursor in ``text``, 0-indexed.
1408 1408 cursor_line : int
1409 1409 line position of the cursor in ``text``, 0-indexed
1410 1410 text : str
1411 1411 text to complete
1412 1412
1413 1413 Notes
1414 1414 -----
1415 1415 If ``IPCompleter.debug`` is ``True`` may return a :any:`_FakeJediCompletion`
1416 1416 object containing a string with the Jedi debug information attached.
1417 1417 """
1418 1418 namespaces = [self.namespace]
1419 1419 if self.global_namespace is not None:
1420 1420 namespaces.append(self.global_namespace)
1421 1421
1422 1422 completion_filter = lambda x:x
1423 1423 offset = cursor_to_position(text, cursor_line, cursor_column)
1424 1424 # filter output if we are completing for object members
1425 1425 if offset:
1426 1426 pre = text[offset-1]
1427 1427 if pre == '.':
1428 1428 if self.omit__names == 2:
1429 1429 completion_filter = lambda c:not c.name.startswith('_')
1430 1430 elif self.omit__names == 1:
1431 1431 completion_filter = lambda c:not (c.name.startswith('__') and c.name.endswith('__'))
1432 1432 elif self.omit__names == 0:
1433 1433 completion_filter = lambda x:x
1434 1434 else:
1435 1435 raise ValueError("Don't understand self.omit__names == {}".format(self.omit__names))
1436 1436
1437 1437 interpreter = jedi.Interpreter(text[:offset], namespaces)
1438 1438 try_jedi = True
1439 1439
1440 1440 try:
1441 1441 # find the first token in the current tree -- if it is a ' or " then we are in a string
1442 1442 completing_string = False
1443 1443 try:
1444 1444 first_child = next(c for c in interpreter._get_module().tree_node.children if hasattr(c, 'value'))
1445 1445 except StopIteration:
1446 1446 pass
1447 1447 else:
1448 1448 # note the value may be ', ", or it may also be ''' or """, or
1449 1449 # in some cases, """what/you/typed..., but all of these are
1450 1450 # strings.
1451 1451 completing_string = len(first_child.value) > 0 and first_child.value[0] in {"'", '"'}
1452 1452
1453 1453 # if we are in a string jedi is likely not the right candidate for
1454 1454 # now. Skip it.
1455 1455 try_jedi = not completing_string
1456 1456 except Exception as e:
1457 1457 # many of things can go wrong, we are using private API just don't crash.
1458 1458 if self.debug:
1459 1459 print("Error detecting if completing a non-finished string :", e, '|')
1460 1460
1461 1461 if not try_jedi:
1462 1462 return []
1463 1463 try:
1464 1464 return filter(completion_filter, interpreter.complete(column=cursor_column, line=cursor_line + 1))
1465 1465 except Exception as e:
1466 1466 if self.debug:
1467 1467 return [_FakeJediCompletion('Oops Jedi has crashed, please report a bug with the following:\n"""\n%s\ns"""' % (e))]
1468 1468 else:
1469 1469 return []
1470 1470
1471 1471 def python_matches(self, text:str)->List[str]:
1472 1472 """Match attributes or global python names"""
1473 1473 if "." in text:
1474 1474 try:
1475 1475 matches = self.attr_matches(text)
1476 1476 if text.endswith('.') and self.omit__names:
1477 1477 if self.omit__names == 1:
1478 1478 # true if txt is _not_ a __ name, false otherwise:
1479 1479 no__name = (lambda txt:
1480 1480 re.match(r'.*\.__.*?__',txt) is None)
1481 1481 else:
1482 1482 # true if txt is _not_ a _ name, false otherwise:
1483 1483 no__name = (lambda txt:
1484 1484 re.match(r'\._.*?',txt[txt.rindex('.'):]) is None)
1485 1485 matches = filter(no__name, matches)
1486 1486 except NameError:
1487 1487 # catches <undefined attributes>.<tab>
1488 1488 matches = []
1489 1489 else:
1490 1490 matches = self.global_matches(text)
1491 1491 return matches
1492 1492
1493 1493 def _default_arguments_from_docstring(self, doc):
1494 1494 """Parse the first line of docstring for call signature.
1495 1495
1496 1496 Docstring should be of the form 'min(iterable[, key=func])\n'.
1497 1497 It can also parse cython docstring of the form
1498 1498 'Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)'.
1499 1499 """
1500 1500 if doc is None:
1501 1501 return []
1502 1502
1503 1503 #care only the firstline
1504 1504 line = doc.lstrip().splitlines()[0]
1505 1505
1506 1506 #p = re.compile(r'^[\w|\s.]+\(([^)]*)\).*')
1507 1507 #'min(iterable[, key=func])\n' -> 'iterable[, key=func]'
1508 1508 sig = self.docstring_sig_re.search(line)
1509 1509 if sig is None:
1510 1510 return []
1511 1511 # iterable[, key=func]' -> ['iterable[' ,' key=func]']
1512 1512 sig = sig.groups()[0].split(',')
1513 1513 ret = []
1514 1514 for s in sig:
1515 1515 #re.compile(r'[\s|\[]*(\w+)(?:\s*=\s*.*)')
1516 1516 ret += self.docstring_kwd_re.findall(s)
1517 1517 return ret
1518 1518
1519 1519 def _default_arguments(self, obj):
1520 1520 """Return the list of default arguments of obj if it is callable,
1521 1521 or empty list otherwise."""
1522 1522 call_obj = obj
1523 1523 ret = []
1524 1524 if inspect.isbuiltin(obj):
1525 1525 pass
1526 1526 elif not (inspect.isfunction(obj) or inspect.ismethod(obj)):
1527 1527 if inspect.isclass(obj):
1528 1528 #for cython embedsignature=True the constructor docstring
1529 1529 #belongs to the object itself not __init__
1530 1530 ret += self._default_arguments_from_docstring(
1531 1531 getattr(obj, '__doc__', ''))
1532 1532 # for classes, check for __init__,__new__
1533 1533 call_obj = (getattr(obj, '__init__', None) or
1534 1534 getattr(obj, '__new__', None))
1535 1535 # for all others, check if they are __call__able
1536 1536 elif hasattr(obj, '__call__'):
1537 1537 call_obj = obj.__call__
1538 1538 ret += self._default_arguments_from_docstring(
1539 1539 getattr(call_obj, '__doc__', ''))
1540 1540
1541 1541 _keeps = (inspect.Parameter.KEYWORD_ONLY,
1542 1542 inspect.Parameter.POSITIONAL_OR_KEYWORD)
1543 1543
1544 1544 try:
1545 1545 sig = inspect.signature(obj)
1546 1546 ret.extend(k for k, v in sig.parameters.items() if
1547 1547 v.kind in _keeps)
1548 1548 except ValueError:
1549 1549 pass
1550 1550
1551 1551 return list(set(ret))
1552 1552
1553 1553 def python_func_kw_matches(self, text):
1554 1554 """Match named parameters (kwargs) of the last open function"""
1555 1555
1556 1556 if "." in text: # a parameter cannot be dotted
1557 1557 return []
1558 1558 try: regexp = self.__funcParamsRegex
1559 1559 except AttributeError:
1560 1560 regexp = self.__funcParamsRegex = re.compile(r'''
1561 1561 '.*?(?<!\\)' | # single quoted strings or
1562 1562 ".*?(?<!\\)" | # double quoted strings or
1563 1563 \w+ | # identifier
1564 1564 \S # other characters
1565 1565 ''', re.VERBOSE | re.DOTALL)
1566 1566 # 1. find the nearest identifier that comes before an unclosed
1567 1567 # parenthesis before the cursor
1568 1568 # e.g. for "foo (1+bar(x), pa<cursor>,a=1)", the candidate is "foo"
1569 1569 tokens = regexp.findall(self.text_until_cursor)
1570 1570 iterTokens = reversed(tokens); openPar = 0
1571 1571
1572 1572 for token in iterTokens:
1573 1573 if token == ')':
1574 1574 openPar -= 1
1575 1575 elif token == '(':
1576 1576 openPar += 1
1577 1577 if openPar > 0:
1578 1578 # found the last unclosed parenthesis
1579 1579 break
1580 1580 else:
1581 1581 return []
1582 1582 # 2. Concatenate dotted names ("foo.bar" for "foo.bar(x, pa" )
1583 1583 ids = []
1584 1584 isId = re.compile(r'\w+$').match
1585 1585
1586 1586 while True:
1587 1587 try:
1588 1588 ids.append(next(iterTokens))
1589 1589 if not isId(ids[-1]):
1590 1590 ids.pop(); break
1591 1591 if not next(iterTokens) == '.':
1592 1592 break
1593 1593 except StopIteration:
1594 1594 break
1595 1595
1596 1596 # Find all named arguments already assigned to, as to avoid suggesting
1597 1597 # them again
1598 1598 usedNamedArgs = set()
1599 1599 par_level = -1
1600 1600 for token, next_token in zip(tokens, tokens[1:]):
1601 1601 if token == '(':
1602 1602 par_level += 1
1603 1603 elif token == ')':
1604 1604 par_level -= 1
1605 1605
1606 1606 if par_level != 0:
1607 1607 continue
1608 1608
1609 1609 if next_token != '=':
1610 1610 continue
1611 1611
1612 1612 usedNamedArgs.add(token)
1613 1613
1614 1614 argMatches = []
1615 1615 try:
1616 1616 callableObj = '.'.join(ids[::-1])
1617 1617 namedArgs = self._default_arguments(eval(callableObj,
1618 1618 self.namespace))
1619 1619
1620 1620 # Remove used named arguments from the list, no need to show twice
1621 1621 for namedArg in set(namedArgs) - usedNamedArgs:
1622 1622 if namedArg.startswith(text):
1623 1623 argMatches.append("%s=" %namedArg)
1624 1624 except:
1625 1625 pass
1626 1626
1627 1627 return argMatches
1628 1628
1629 1629 @staticmethod
1630 1630 def _get_keys(obj: Any) -> List[Any]:
1631 1631 # Objects can define their own completions by defining an
1632 1632 # _ipy_key_completions_() method.
1633 1633 method = get_real_method(obj, '_ipython_key_completions_')
1634 1634 if method is not None:
1635 1635 return method()
1636 1636
1637 1637 # Special case some common in-memory dict-like types
1638 1638 if isinstance(obj, dict) or\
1639 1639 _safe_isinstance(obj, 'pandas', 'DataFrame'):
1640 1640 try:
1641 1641 return list(obj.keys())
1642 1642 except Exception:
1643 1643 return []
1644 1644 elif _safe_isinstance(obj, 'numpy', 'ndarray') or\
1645 1645 _safe_isinstance(obj, 'numpy', 'void'):
1646 1646 return obj.dtype.names or []
1647 1647 return []
1648 1648
1649 1649 def dict_key_matches(self, text:str) -> List[str]:
1650 1650 "Match string keys in a dictionary, after e.g. 'foo[' "
1651 1651
1652 1652
1653 1653 if self.__dict_key_regexps is not None:
1654 1654 regexps = self.__dict_key_regexps
1655 1655 else:
1656 1656 dict_key_re_fmt = r'''(?x)
1657 1657 ( # match dict-referring expression wrt greedy setting
1658 1658 %s
1659 1659 )
1660 1660 \[ # open bracket
1661 1661 \s* # and optional whitespace
1662 1662 # Capture any number of str-like objects (e.g. "a", "b", 'c')
1663 1663 ((?:[uUbB]? # string prefix (r not handled)
1664 1664 (?:
1665 1665 '(?:[^']|(?<!\\)\\')*'
1666 1666 |
1667 1667 "(?:[^"]|(?<!\\)\\")*"
1668 1668 )
1669 1669 \s*,\s*
1670 1670 )*)
1671 1671 ([uUbB]? # string prefix (r not handled)
1672 1672 (?: # unclosed string
1673 1673 '(?:[^']|(?<!\\)\\')*
1674 1674 |
1675 1675 "(?:[^"]|(?<!\\)\\")*
1676 1676 )
1677 1677 )?
1678 1678 $
1679 1679 '''
1680 1680 regexps = self.__dict_key_regexps = {
1681 1681 False: re.compile(dict_key_re_fmt % r'''
1682 1682 # identifiers separated by .
1683 1683 (?!\d)\w+
1684 1684 (?:\.(?!\d)\w+)*
1685 1685 '''),
1686 1686 True: re.compile(dict_key_re_fmt % '''
1687 1687 .+
1688 1688 ''')
1689 1689 }
1690 1690
1691 1691 match = regexps[self.greedy].search(self.text_until_cursor)
1692 1692
1693 1693 if match is None:
1694 1694 return []
1695 1695
1696 1696 expr, prefix0, prefix = match.groups()
1697 1697 try:
1698 1698 obj = eval(expr, self.namespace)
1699 1699 except Exception:
1700 1700 try:
1701 1701 obj = eval(expr, self.global_namespace)
1702 1702 except Exception:
1703 1703 return []
1704 1704
1705 1705 keys = self._get_keys(obj)
1706 1706 if not keys:
1707 1707 return keys
1708 1708
1709 1709 extra_prefix = eval(prefix0) if prefix0 != '' else None
1710 1710
1711 1711 closing_quote, token_offset, matches = match_dict_keys(keys, prefix, self.splitter.delims, extra_prefix=extra_prefix)
1712 1712 if not matches:
1713 1713 return matches
1714 1714
1715 1715 # get the cursor position of
1716 1716 # - the text being completed
1717 1717 # - the start of the key text
1718 1718 # - the start of the completion
1719 1719 text_start = len(self.text_until_cursor) - len(text)
1720 1720 if prefix:
1721 1721 key_start = match.start(3)
1722 1722 completion_start = key_start + token_offset
1723 1723 else:
1724 1724 key_start = completion_start = match.end()
1725 1725
1726 1726 # grab the leading prefix, to make sure all completions start with `text`
1727 1727 if text_start > key_start:
1728 1728 leading = ''
1729 1729 else:
1730 1730 leading = text[text_start:completion_start]
1731 1731
1732 1732 # the index of the `[` character
1733 1733 bracket_idx = match.end(1)
1734 1734
1735 1735 # append closing quote and bracket as appropriate
1736 1736 # this is *not* appropriate if the opening quote or bracket is outside
1737 1737 # the text given to this method
1738 1738 suf = ''
1739 1739 continuation = self.line_buffer[len(self.text_until_cursor):]
1740 1740 if key_start > text_start and closing_quote:
1741 1741 # quotes were opened inside text, maybe close them
1742 1742 if continuation.startswith(closing_quote):
1743 1743 continuation = continuation[len(closing_quote):]
1744 1744 else:
1745 1745 suf += closing_quote
1746 1746 if bracket_idx > text_start:
1747 1747 # brackets were opened inside text, maybe close them
1748 1748 if not continuation.startswith(']'):
1749 1749 suf += ']'
1750 1750
1751 1751 return [leading + k + suf for k in matches]
1752 1752
1753 1753 @staticmethod
1754 1754 def unicode_name_matches(text:str) -> Tuple[str, List[str]] :
1755 1755 """Match Latex-like syntax for unicode characters base
1756 1756 on the name of the character.
1757 1757
1758 1758 This does ``\\GREEK SMALL LETTER ETA`` -> ``η``
1759 1759
1760 1760 Works only on valid python 3 identifier, or on combining characters that
1761 1761 will combine to form a valid identifier.
1762 1762 """
1763 1763 slashpos = text.rfind('\\')
1764 1764 if slashpos > -1:
1765 1765 s = text[slashpos+1:]
1766 1766 try :
1767 1767 unic = unicodedata.lookup(s)
1768 1768 # allow combining chars
1769 1769 if ('a'+unic).isidentifier():
1770 1770 return '\\'+s,[unic]
1771 1771 except KeyError:
1772 1772 pass
1773 1773 return '', []
1774 1774
1775 1775
1776 1776 def latex_matches(self, text:str) -> Tuple[str, Sequence[str]]:
1777 1777 """Match Latex syntax for unicode characters.
1778 1778
1779 1779 This does both ``\\alp`` -> ``\\alpha`` and ``\\alpha`` -> ``α``
1780 1780 """
1781 1781 slashpos = text.rfind('\\')
1782 1782 if slashpos > -1:
1783 1783 s = text[slashpos:]
1784 1784 if s in latex_symbols:
1785 1785 # Try to complete a full latex symbol to unicode
1786 1786 # \\alpha -> α
1787 1787 return s, [latex_symbols[s]]
1788 1788 else:
1789 1789 # If a user has partially typed a latex symbol, give them
1790 1790 # a full list of options \al -> [\aleph, \alpha]
1791 1791 matches = [k for k in latex_symbols if k.startswith(s)]
1792 1792 if matches:
1793 1793 return s, matches
1794 1794 return '', ()
1795 1795
1796 1796 def dispatch_custom_completer(self, text):
1797 1797 if not self.custom_completers:
1798 1798 return
1799 1799
1800 1800 line = self.line_buffer
1801 1801 if not line.strip():
1802 1802 return None
1803 1803
1804 1804 # Create a little structure to pass all the relevant information about
1805 1805 # the current completion to any custom completer.
1806 1806 event = SimpleNamespace()
1807 1807 event.line = line
1808 1808 event.symbol = text
1809 1809 cmd = line.split(None,1)[0]
1810 1810 event.command = cmd
1811 1811 event.text_until_cursor = self.text_until_cursor
1812 1812
1813 1813 # for foo etc, try also to find completer for %foo
1814 1814 if not cmd.startswith(self.magic_escape):
1815 1815 try_magic = self.custom_completers.s_matches(
1816 1816 self.magic_escape + cmd)
1817 1817 else:
1818 1818 try_magic = []
1819 1819
1820 1820 for c in itertools.chain(self.custom_completers.s_matches(cmd),
1821 1821 try_magic,
1822 1822 self.custom_completers.flat_matches(self.text_until_cursor)):
1823 1823 try:
1824 1824 res = c(event)
1825 1825 if res:
1826 1826 # first, try case sensitive match
1827 1827 withcase = [r for r in res if r.startswith(text)]
1828 1828 if withcase:
1829 1829 return withcase
1830 1830 # if none, then case insensitive ones are ok too
1831 1831 text_low = text.lower()
1832 1832 return [r for r in res if r.lower().startswith(text_low)]
1833 1833 except TryNext:
1834 1834 pass
1835 1835 except KeyboardInterrupt:
1836 1836 """
1837 1837 If custom completer take too long,
1838 1838 let keyboard interrupt abort and return nothing.
1839 1839 """
1840 1840 break
1841 1841
1842 1842 return None
1843 1843
1844 1844 def completions(self, text: str, offset: int)->Iterator[Completion]:
1845 1845 """
1846 1846 Returns an iterator over the possible completions
1847 1847
1848 1848 .. warning::
1849 1849
1850 1850 Unstable
1851 1851
1852 1852 This function is unstable, API may change without warning.
1853 1853 It will also raise unless use in proper context manager.
1854 1854
1855 1855 Parameters
1856 1856 ----------
1857 1857 text : str
1858 1858 Full text of the current input, multi line string.
1859 1859 offset : int
1860 1860 Integer representing the position of the cursor in ``text``. Offset
1861 1861 is 0-based indexed.
1862 1862
1863 1863 Yields
1864 1864 ------
1865 1865 Completion
1866 1866
1867 1867 Notes
1868 1868 -----
1869 1869 The cursor on a text can either be seen as being "in between"
1870 1870 characters or "On" a character depending on the interface visible to
1871 1871 the user. For consistency the cursor being on "in between" characters X
1872 1872 and Y is equivalent to the cursor being "on" character Y, that is to say
1873 1873 the character the cursor is on is considered as being after the cursor.
1874 1874
1875 1875 Combining characters may span more that one position in the
1876 1876 text.
1877 1877
1878 1878 .. note::
1879 1879
1880 1880 If ``IPCompleter.debug`` is :any:`True` will yield a ``--jedi/ipython--``
1881 1881 fake Completion token to distinguish completion returned by Jedi
1882 1882 and usual IPython completion.
1883 1883
1884 1884 .. note::
1885 1885
1886 1886 Completions are not completely deduplicated yet. If identical
1887 1887 completions are coming from different sources this function does not
1888 1888 ensure that each completion object will only be present once.
1889 1889 """
1890 1890 warnings.warn("_complete is a provisional API (as of IPython 6.0). "
1891 1891 "It may change without warnings. "
1892 1892 "Use in corresponding context manager.",
1893 1893 category=ProvisionalCompleterWarning, stacklevel=2)
1894 1894
1895 1895 seen = set()
1896 1896 profiler:Optional[cProfile.Profile]
1897 1897 try:
1898 1898 if self.profile_completions:
1899 1899 import cProfile
1900 1900 profiler = cProfile.Profile()
1901 1901 profiler.enable()
1902 1902 else:
1903 1903 profiler = None
1904 1904
1905 1905 for c in self._completions(text, offset, _timeout=self.jedi_compute_type_timeout/1000):
1906 1906 if c and (c in seen):
1907 1907 continue
1908 1908 yield c
1909 1909 seen.add(c)
1910 1910 except KeyboardInterrupt:
1911 1911 """if completions take too long and users send keyboard interrupt,
1912 1912 do not crash and return ASAP. """
1913 1913 pass
1914 1914 finally:
1915 1915 if profiler is not None:
1916 1916 profiler.disable()
1917 1917 ensure_dir_exists(self.profiler_output_dir)
1918 1918 output_path = os.path.join(self.profiler_output_dir, str(uuid.uuid4()))
1919 1919 print("Writing profiler output to", output_path)
1920 1920 profiler.dump_stats(output_path)
1921 1921
1922 1922 def _completions(self, full_text: str, offset: int, *, _timeout) -> Iterator[Completion]:
1923 1923 """
1924 1924 Core completion module.Same signature as :any:`completions`, with the
1925 1925 extra `timeout` parameter (in seconds).
1926 1926
1927 1927 Computing jedi's completion ``.type`` can be quite expensive (it is a
1928 1928 lazy property) and can require some warm-up, more warm up than just
1929 1929 computing the ``name`` of a completion. The warm-up can be :
1930 1930
1931 1931 - Long warm-up the first time a module is encountered after
1932 1932 install/update: actually build parse/inference tree.
1933 1933
1934 1934 - first time the module is encountered in a session: load tree from
1935 1935 disk.
1936 1936
1937 1937 We don't want to block completions for tens of seconds so we give the
1938 1938 completer a "budget" of ``_timeout`` seconds per invocation to compute
1939 1939 completions types, the completions that have not yet been computed will
1940 1940 be marked as "unknown" an will have a chance to be computed next round
1941 1941 are things get cached.
1942 1942
1943 1943 Keep in mind that Jedi is not the only thing treating the completion so
1944 1944 keep the timeout short-ish as if we take more than 0.3 second we still
1945 1945 have lots of processing to do.
1946 1946
1947 1947 """
1948 1948 deadline = time.monotonic() + _timeout
1949 1949
1950 1950
1951 1951 before = full_text[:offset]
1952 1952 cursor_line, cursor_column = position_to_cursor(full_text, offset)
1953 1953
1954 1954 matched_text, matches, matches_origin, jedi_matches = self._complete(
1955 1955 full_text=full_text, cursor_line=cursor_line, cursor_pos=cursor_column)
1956 1956
1957 1957 iter_jm = iter(jedi_matches)
1958 1958 if _timeout:
1959 1959 for jm in iter_jm:
1960 1960 try:
1961 1961 type_ = jm.type
1962 1962 except Exception:
1963 1963 if self.debug:
1964 1964 print("Error in Jedi getting type of ", jm)
1965 1965 type_ = None
1966 1966 delta = len(jm.name_with_symbols) - len(jm.complete)
1967 1967 if type_ == 'function':
1968 1968 signature = _make_signature(jm)
1969 1969 else:
1970 1970 signature = ''
1971 1971 yield Completion(start=offset - delta,
1972 1972 end=offset,
1973 1973 text=jm.name_with_symbols,
1974 1974 type=type_,
1975 1975 signature=signature,
1976 1976 _origin='jedi')
1977 1977
1978 1978 if time.monotonic() > deadline:
1979 1979 break
1980 1980
1981 1981 for jm in iter_jm:
1982 1982 delta = len(jm.name_with_symbols) - len(jm.complete)
1983 1983 yield Completion(start=offset - delta,
1984 1984 end=offset,
1985 1985 text=jm.name_with_symbols,
1986 1986 type='<unknown>', # don't compute type for speed
1987 1987 _origin='jedi',
1988 1988 signature='')
1989 1989
1990 1990
1991 1991 start_offset = before.rfind(matched_text)
1992 1992
1993 1993 # TODO:
1994 1994 # Suppress this, right now just for debug.
1995 1995 if jedi_matches and matches and self.debug:
1996 1996 yield Completion(start=start_offset, end=offset, text='--jedi/ipython--',
1997 1997 _origin='debug', type='none', signature='')
1998 1998
1999 1999 # I'm unsure if this is always true, so let's assert and see if it
2000 2000 # crash
2001 2001 assert before.endswith(matched_text)
2002 2002 for m, t in zip(matches, matches_origin):
2003 2003 yield Completion(start=start_offset, end=offset, text=m, _origin=t, signature='', type='<unknown>')
2004 2004
2005 2005
2006 2006 def complete(self, text=None, line_buffer=None, cursor_pos=None) -> Tuple[str, Sequence[str]]:
2007 2007 """Find completions for the given text and line context.
2008 2008
2009 2009 Note that both the text and the line_buffer are optional, but at least
2010 2010 one of them must be given.
2011 2011
2012 2012 Parameters
2013 2013 ----------
2014 2014 text : string, optional
2015 2015 Text to perform the completion on. If not given, the line buffer
2016 2016 is split using the instance's CompletionSplitter object.
2017 2017 line_buffer : string, optional
2018 2018 If not given, the completer attempts to obtain the current line
2019 2019 buffer via readline. This keyword allows clients which are
2020 2020 requesting for text completions in non-readline contexts to inform
2021 2021 the completer of the entire text.
2022 2022 cursor_pos : int, optional
2023 2023 Index of the cursor in the full line buffer. Should be provided by
2024 2024 remote frontends where kernel has no access to frontend state.
2025 2025
2026 2026 Returns
2027 2027 -------
2028 2028 Tuple of two items:
2029 2029 text : str
2030 2030 Text that was actually used in the completion.
2031 2031 matches : list
2032 2032 A list of completion matches.
2033 2033
2034 2034 Notes
2035 2035 -----
2036 2036 This API is likely to be deprecated and replaced by
2037 2037 :any:`IPCompleter.completions` in the future.
2038 2038
2039 2039 """
2040 2040 warnings.warn('`Completer.complete` is pending deprecation since '
2041 2041 'IPython 6.0 and will be replaced by `Completer.completions`.',
2042 2042 PendingDeprecationWarning)
2043 2043 # potential todo, FOLD the 3rd throw away argument of _complete
2044 2044 # into the first 2 one.
2045 2045 return self._complete(line_buffer=line_buffer, cursor_pos=cursor_pos, text=text, cursor_line=0)[:2]
2046 2046
2047 2047 def _complete(self, *, cursor_line, cursor_pos, line_buffer=None, text=None,
2048 2048 full_text=None) -> _CompleteResult:
2049 2049 """
2050 2050 Like complete but can also returns raw jedi completions as well as the
2051 2051 origin of the completion text. This could (and should) be made much
2052 2052 cleaner but that will be simpler once we drop the old (and stateful)
2053 2053 :any:`complete` API.
2054 2054
2055 2055 With current provisional API, cursor_pos act both (depending on the
2056 2056 caller) as the offset in the ``text`` or ``line_buffer``, or as the
2057 2057 ``column`` when passing multiline strings this could/should be renamed
2058 2058 but would add extra noise.
2059 2059
2060 2060 Returns
2061 2061 -------
2062 2062 A tuple of N elements which are (likely):
2063 2063 matched_text: ? the text that the complete matched
2064 2064 matches: list of completions ?
2065 matches_origin: ? list same lenght as matches, and where each completion came from
2065 matches_origin: ? list same length as matches, and where each completion came from
2066 2066 jedi_matches: list of Jedi matches, have it's own structure.
2067 2067 """
2068 2068
2069 2069
2070 2070 # if the cursor position isn't given, the only sane assumption we can
2071 2071 # make is that it's at the end of the line (the common case)
2072 2072 if cursor_pos is None:
2073 2073 cursor_pos = len(line_buffer) if text is None else len(text)
2074 2074
2075 2075 if self.use_main_ns:
2076 2076 self.namespace = __main__.__dict__
2077 2077
2078 2078 # if text is either None or an empty string, rely on the line buffer
2079 2079 if (not line_buffer) and full_text:
2080 2080 line_buffer = full_text.split('\n')[cursor_line]
2081 2081 if not text: # issue #11508: check line_buffer before calling split_line
2082 2082 text = self.splitter.split_line(line_buffer, cursor_pos) if line_buffer else ''
2083 2083
2084 2084 if self.backslash_combining_completions:
2085 2085 # allow deactivation of these on windows.
2086 2086 base_text = text if not line_buffer else line_buffer[:cursor_pos]
2087 2087
2088 2088 for meth in (self.latex_matches,
2089 2089 self.unicode_name_matches,
2090 2090 back_latex_name_matches,
2091 2091 back_unicode_name_matches,
2092 2092 self.fwd_unicode_match):
2093 2093 name_text, name_matches = meth(base_text)
2094 2094 if name_text:
2095 2095 return _CompleteResult(name_text, name_matches[:MATCHES_LIMIT], \
2096 2096 [meth.__qualname__]*min(len(name_matches), MATCHES_LIMIT), ())
2097 2097
2098 2098
2099 2099 # If no line buffer is given, assume the input text is all there was
2100 2100 if line_buffer is None:
2101 2101 line_buffer = text
2102 2102
2103 2103 self.line_buffer = line_buffer
2104 2104 self.text_until_cursor = self.line_buffer[:cursor_pos]
2105 2105
2106 2106 # Do magic arg matches
2107 2107 for matcher in self.magic_arg_matchers:
2108 2108 matches = list(matcher(line_buffer))[:MATCHES_LIMIT]
2109 2109 if matches:
2110 2110 origins = [matcher.__qualname__] * len(matches)
2111 2111 return _CompleteResult(text, matches, origins, ())
2112 2112
2113 2113 # Start with a clean slate of completions
2114 2114 matches = []
2115 2115
2116 2116 # FIXME: we should extend our api to return a dict with completions for
2117 2117 # different types of objects. The rlcomplete() method could then
2118 2118 # simply collapse the dict into a list for readline, but we'd have
2119 2119 # richer completion semantics in other environments.
2120 2120 completions:Iterable[Any] = []
2121 2121 if self.use_jedi:
2122 2122 if not full_text:
2123 2123 full_text = line_buffer
2124 2124 completions = self._jedi_matches(
2125 2125 cursor_pos, cursor_line, full_text)
2126 2126
2127 2127 if self.merge_completions:
2128 2128 matches = []
2129 2129 for matcher in self.matchers:
2130 2130 try:
2131 2131 matches.extend([(m, matcher.__qualname__)
2132 2132 for m in matcher(text)])
2133 2133 except:
2134 2134 # Show the ugly traceback if the matcher causes an
2135 2135 # exception, but do NOT crash the kernel!
2136 2136 sys.excepthook(*sys.exc_info())
2137 2137 else:
2138 2138 for matcher in self.matchers:
2139 2139 matches = [(m, matcher.__qualname__)
2140 2140 for m in matcher(text)]
2141 2141 if matches:
2142 2142 break
2143 2143
2144 2144 seen = set()
2145 2145 filtered_matches = set()
2146 2146 for m in matches:
2147 2147 t, c = m
2148 2148 if t not in seen:
2149 2149 filtered_matches.add(m)
2150 2150 seen.add(t)
2151 2151
2152 2152 _filtered_matches = sorted(filtered_matches, key=lambda x: completions_sorting_key(x[0]))
2153 2153
2154 2154 custom_res = [(m, 'custom') for m in self.dispatch_custom_completer(text) or []]
2155 2155
2156 2156 _filtered_matches = custom_res or _filtered_matches
2157 2157
2158 2158 _filtered_matches = _filtered_matches[:MATCHES_LIMIT]
2159 2159 _matches = [m[0] for m in _filtered_matches]
2160 2160 origins = [m[1] for m in _filtered_matches]
2161 2161
2162 2162 self.matches = _matches
2163 2163
2164 2164 return _CompleteResult(text, _matches, origins, completions)
2165 2165
2166 2166 def fwd_unicode_match(self, text:str) -> Tuple[str, Sequence[str]]:
2167 2167 """
2168 2168 Forward match a string starting with a backslash with a list of
2169 2169 potential Unicode completions.
2170 2170
2171 2171 Will compute list list of Unicode character names on first call and cache it.
2172 2172
2173 2173 Returns
2174 2174 -------
2175 2175 At tuple with:
2176 2176 - matched text (empty if no matches)
2177 2177 - list of potential completions, empty tuple otherwise)
2178 2178 """
2179 2179 # TODO: self.unicode_names is here a list we traverse each time with ~100k elements.
2180 2180 # We could do a faster match using a Trie.
2181 2181
2182 # Using pygtrie the follwing seem to work:
2182 # Using pygtrie the following seem to work:
2183 2183
2184 2184 # s = PrefixSet()
2185 2185
2186 2186 # for c in range(0,0x10FFFF + 1):
2187 2187 # try:
2188 2188 # s.add(unicodedata.name(chr(c)))
2189 2189 # except ValueError:
2190 2190 # pass
2191 2191 # [''.join(k) for k in s.iter(prefix)]
2192 2192
2193 2193 # But need to be timed and adds an extra dependency.
2194 2194
2195 2195 slashpos = text.rfind('\\')
2196 2196 # if text starts with slash
2197 2197 if slashpos > -1:
2198 2198 # PERF: It's important that we don't access self._unicode_names
2199 2199 # until we're inside this if-block. _unicode_names is lazily
2200 2200 # initialized, and it takes a user-noticeable amount of time to
2201 2201 # initialize it, so we don't want to initialize it unless we're
2202 2202 # actually going to use it.
2203 2203 s = text[slashpos+1:]
2204 2204 candidates = [x for x in self.unicode_names if x.startswith(s)]
2205 2205 if candidates:
2206 2206 return s, candidates
2207 2207 else:
2208 2208 return '', ()
2209 2209
2210 2210 # if text does not start with slash
2211 2211 else:
2212 2212 return '', ()
2213 2213
2214 2214 @property
2215 2215 def unicode_names(self) -> List[str]:
2216 2216 """List of names of unicode code points that can be completed.
2217 2217
2218 2218 The list is lazily initialized on first access.
2219 2219 """
2220 2220 if self._unicode_names is None:
2221 2221 names = []
2222 2222 for c in range(0,0x10FFFF + 1):
2223 2223 try:
2224 2224 names.append(unicodedata.name(chr(c)))
2225 2225 except ValueError:
2226 2226 pass
2227 2227 self._unicode_names = _unicode_name_compute(_UNICODE_RANGES)
2228 2228
2229 2229 return self._unicode_names
2230 2230
2231 2231 def _unicode_name_compute(ranges:List[Tuple[int,int]]) -> List[str]:
2232 2232 names = []
2233 2233 for start,stop in ranges:
2234 2234 for c in range(start, stop) :
2235 2235 try:
2236 2236 names.append(unicodedata.name(chr(c)))
2237 2237 except ValueError:
2238 2238 pass
2239 2239 return names
@@ -1,1106 +1,1106 b''
1 1 # -*- coding: utf-8 -*-
2 2 """
3 3 Pdb debugger class.
4 4
5 5
6 6 This is an extension to PDB which adds a number of new features.
7 7 Note that there is also the `IPython.terminal.debugger` class which provides UI
8 8 improvements.
9 9
10 10 We also strongly recommend to use this via the `ipdb` package, which provides
11 11 extra configuration options.
12 12
13 13 Among other things, this subclass of PDB:
14 14 - supports many IPython magics like pdef/psource
15 15 - hide frames in tracebacks based on `__tracebackhide__`
16 16 - allows to skip frames based on `__debuggerskip__`
17 17
18 18 The skipping and hiding frames are configurable via the `skip_predicates`
19 19 command.
20 20
21 21 By default, frames from readonly files will be hidden, frames containing
22 22 ``__tracebackhide__=True`` will be hidden.
23 23
24 24 Frames containing ``__debuggerskip__`` will be stepped over, frames who's parent
25 25 frames value of ``__debuggerskip__`` is ``True`` will be skipped.
26 26
27 27 >>> def helpers_helper():
28 28 ... pass
29 29 ...
30 30 ... def helper_1():
31 31 ... print("don't step in me")
32 32 ... helpers_helpers() # will be stepped over unless breakpoint set.
33 33 ...
34 34 ...
35 35 ... def helper_2():
36 36 ... print("in me neither")
37 37 ...
38 38
39 39 One can define a decorator that wraps a function between the two helpers:
40 40
41 41 >>> def pdb_skipped_decorator(function):
42 42 ...
43 43 ...
44 44 ... def wrapped_fn(*args, **kwargs):
45 45 ... __debuggerskip__ = True
46 46 ... helper_1()
47 47 ... __debuggerskip__ = False
48 48 ... result = function(*args, **kwargs)
49 49 ... __debuggerskip__ = True
50 50 ... helper_2()
51 51 ... # setting __debuggerskip__ to False again is not necessary
52 52 ... return result
53 53 ...
54 54 ... return wrapped_fn
55 55
56 56 When decorating a function, ipdb will directly step into ``bar()`` by
57 57 default:
58 58
59 59 >>> @foo_decorator
60 60 ... def bar(x, y):
61 61 ... return x * y
62 62
63 63
64 64 You can toggle the behavior with
65 65
66 66 ipdb> skip_predicates debuggerskip false
67 67
68 68 or configure it in your ``.pdbrc``
69 69
70 70
71 71
72 Licencse
73 --------
72 License
73 -------
74 74
75 75 Modified from the standard pdb.Pdb class to avoid including readline, so that
76 76 the command line completion of other programs which include this isn't
77 77 damaged.
78 78
79 79 In the future, this class will be expanded with improvements over the standard
80 80 pdb.
81 81
82 82 The original code in this file is mainly lifted out of cmd.py in Python 2.2,
83 83 with minor changes. Licensing should therefore be under the standard Python
84 84 terms. For details on the PSF (Python Software Foundation) standard license,
85 85 see:
86 86
87 87 https://docs.python.org/2/license.html
88 88
89 89
90 90 All the changes since then are under the same license as IPython.
91 91
92 92 """
93 93
94 94 #*****************************************************************************
95 95 #
96 96 # This file is licensed under the PSF license.
97 97 #
98 98 # Copyright (C) 2001 Python Software Foundation, www.python.org
99 99 # Copyright (C) 2005-2006 Fernando Perez. <fperez@colorado.edu>
100 100 #
101 101 #
102 102 #*****************************************************************************
103 103
104 104 import bdb
105 105 import functools
106 106 import inspect
107 107 import linecache
108 108 import sys
109 109 import warnings
110 110 import re
111 111 import os
112 112
113 113 from IPython import get_ipython
114 114 from IPython.utils import PyColorize
115 115 from IPython.utils import coloransi, py3compat
116 116 from IPython.core.excolors import exception_colors
117 117 from IPython.testing.skipdoctest import skip_doctest
118 118
119 119
120 120 prompt = 'ipdb> '
121 121
122 122 # We have to check this directly from sys.argv, config struct not yet available
123 123 from pdb import Pdb as OldPdb
124 124
125 125 # Allow the set_trace code to operate outside of an ipython instance, even if
126 126 # it does so with some limitations. The rest of this support is implemented in
127 127 # the Tracer constructor.
128 128
129 129 DEBUGGERSKIP = "__debuggerskip__"
130 130
131 131
132 132 def make_arrow(pad):
133 133 """generate the leading arrow in front of traceback or debugger"""
134 134 if pad >= 2:
135 135 return '-'*(pad-2) + '> '
136 136 elif pad == 1:
137 137 return '>'
138 138 return ''
139 139
140 140
141 141 def BdbQuit_excepthook(et, ev, tb, excepthook=None):
142 142 """Exception hook which handles `BdbQuit` exceptions.
143 143
144 144 All other exceptions are processed using the `excepthook`
145 145 parameter.
146 146 """
147 147 warnings.warn("`BdbQuit_excepthook` is deprecated since version 5.1",
148 148 DeprecationWarning, stacklevel=2)
149 149 if et == bdb.BdbQuit:
150 150 print('Exiting Debugger.')
151 151 elif excepthook is not None:
152 152 excepthook(et, ev, tb)
153 153 else:
154 154 # Backwards compatibility. Raise deprecation warning?
155 155 BdbQuit_excepthook.excepthook_ori(et, ev, tb)
156 156
157 157
158 158 def BdbQuit_IPython_excepthook(self, et, ev, tb, tb_offset=None):
159 159 warnings.warn(
160 160 "`BdbQuit_IPython_excepthook` is deprecated since version 5.1",
161 161 DeprecationWarning, stacklevel=2)
162 162 print('Exiting Debugger.')
163 163
164 164
165 165 class Tracer(object):
166 166 """
167 167 DEPRECATED
168 168
169 169 Class for local debugging, similar to pdb.set_trace.
170 170
171 171 Instances of this class, when called, behave like pdb.set_trace, but
172 172 providing IPython's enhanced capabilities.
173 173
174 174 This is implemented as a class which must be initialized in your own code
175 175 and not as a standalone function because we need to detect at runtime
176 176 whether IPython is already active or not. That detection is done in the
177 177 constructor, ensuring that this code plays nicely with a running IPython,
178 178 while functioning acceptably (though with limitations) if outside of it.
179 179 """
180 180
181 181 @skip_doctest
182 182 def __init__(self, colors=None):
183 183 """
184 184 DEPRECATED
185 185
186 186 Create a local debugger instance.
187 187
188 188 Parameters
189 189 ----------
190 190 colors : str, optional
191 191 The name of the color scheme to use, it must be one of IPython's
192 192 valid color schemes. If not given, the function will default to
193 193 the current IPython scheme when running inside IPython, and to
194 194 'NoColor' otherwise.
195 195
196 196 Examples
197 197 --------
198 198 ::
199 199
200 200 from IPython.core.debugger import Tracer; debug_here = Tracer()
201 201
202 202 Later in your code::
203 203
204 204 debug_here() # -> will open up the debugger at that point.
205 205
206 206 Once the debugger activates, you can use all of its regular commands to
207 207 step through code, set breakpoints, etc. See the pdb documentation
208 208 from the Python standard library for usage details.
209 209 """
210 210 warnings.warn("`Tracer` is deprecated since version 5.1, directly use "
211 211 "`IPython.core.debugger.Pdb.set_trace()`",
212 212 DeprecationWarning, stacklevel=2)
213 213
214 214 ip = get_ipython()
215 215 if ip is None:
216 216 # Outside of ipython, we set our own exception hook manually
217 217 sys.excepthook = functools.partial(BdbQuit_excepthook,
218 218 excepthook=sys.excepthook)
219 219 def_colors = 'NoColor'
220 220 else:
221 221 # In ipython, we use its custom exception handler mechanism
222 222 def_colors = ip.colors
223 223 ip.set_custom_exc((bdb.BdbQuit,), BdbQuit_IPython_excepthook)
224 224
225 225 if colors is None:
226 226 colors = def_colors
227 227
228 228 # The stdlib debugger internally uses a modified repr from the `repr`
229 229 # module, that limits the length of printed strings to a hardcoded
230 230 # limit of 30 characters. That much trimming is too aggressive, let's
231 231 # at least raise that limit to 80 chars, which should be enough for
232 232 # most interactive uses.
233 233 try:
234 234 from reprlib import aRepr
235 235 aRepr.maxstring = 80
236 236 except:
237 237 # This is only a user-facing convenience, so any error we encounter
238 238 # here can be warned about but can be otherwise ignored. These
239 239 # printouts will tell us about problems if this API changes
240 240 import traceback
241 241 traceback.print_exc()
242 242
243 243 self.debugger = Pdb(colors)
244 244
245 245 def __call__(self):
246 246 """Starts an interactive debugger at the point where called.
247 247
248 248 This is similar to the pdb.set_trace() function from the std lib, but
249 249 using IPython's enhanced debugger."""
250 250
251 251 self.debugger.set_trace(sys._getframe().f_back)
252 252
253 253
254 254 RGX_EXTRA_INDENT = re.compile(r'(?<=\n)\s+')
255 255
256 256
257 257 def strip_indentation(multiline_string):
258 258 return RGX_EXTRA_INDENT.sub('', multiline_string)
259 259
260 260
261 261 def decorate_fn_with_doc(new_fn, old_fn, additional_text=""):
262 262 """Make new_fn have old_fn's doc string. This is particularly useful
263 263 for the ``do_...`` commands that hook into the help system.
264 264 Adapted from from a comp.lang.python posting
265 265 by Duncan Booth."""
266 266 def wrapper(*args, **kw):
267 267 return new_fn(*args, **kw)
268 268 if old_fn.__doc__:
269 269 wrapper.__doc__ = strip_indentation(old_fn.__doc__) + additional_text
270 270 return wrapper
271 271
272 272
273 273 class Pdb(OldPdb):
274 274 """Modified Pdb class, does not load readline.
275 275
276 276 for a standalone version that uses prompt_toolkit, see
277 277 `IPython.terminal.debugger.TerminalPdb` and
278 278 `IPython.terminal.debugger.set_trace()`
279 279
280 280
281 281 This debugger can hide and skip frames that are tagged according to some predicates.
282 282 See the `skip_predicates` commands.
283 283
284 284 """
285 285
286 286 default_predicates = {
287 287 "tbhide": True,
288 288 "readonly": False,
289 289 "ipython_internal": True,
290 290 "debuggerskip": True,
291 291 }
292 292
293 293 def __init__(self, color_scheme=None, completekey=None,
294 294 stdin=None, stdout=None, context=5, **kwargs):
295 295 """Create a new IPython debugger.
296 296
297 297 Parameters
298 298 ----------
299 299 color_scheme : default None
300 300 Deprecated, do not use.
301 301 completekey : default None
302 302 Passed to pdb.Pdb.
303 303 stdin : default None
304 304 Passed to pdb.Pdb.
305 305 stdout : default None
306 306 Passed to pdb.Pdb.
307 307 context : int
308 308 Number of lines of source code context to show when
309 309 displaying stacktrace information.
310 310 **kwargs
311 311 Passed to pdb.Pdb.
312 312
313 313 Notes
314 314 -----
315 315 The possibilities are python version dependent, see the python
316 316 docs for more info.
317 317 """
318 318
319 319 # Parent constructor:
320 320 try:
321 321 self.context = int(context)
322 322 if self.context <= 0:
323 323 raise ValueError("Context must be a positive integer")
324 324 except (TypeError, ValueError) as e:
325 325 raise ValueError("Context must be a positive integer") from e
326 326
327 327 # `kwargs` ensures full compatibility with stdlib's `pdb.Pdb`.
328 328 OldPdb.__init__(self, completekey, stdin, stdout, **kwargs)
329 329
330 330 # IPython changes...
331 331 self.shell = get_ipython()
332 332
333 333 if self.shell is None:
334 334 save_main = sys.modules['__main__']
335 335 # No IPython instance running, we must create one
336 336 from IPython.terminal.interactiveshell import \
337 337 TerminalInteractiveShell
338 338 self.shell = TerminalInteractiveShell.instance()
339 339 # needed by any code which calls __import__("__main__") after
340 340 # the debugger was entered. See also #9941.
341 341 sys.modules["__main__"] = save_main
342 342
343 343 if color_scheme is not None:
344 344 warnings.warn(
345 345 "The `color_scheme` argument is deprecated since version 5.1",
346 346 DeprecationWarning, stacklevel=2)
347 347 else:
348 348 color_scheme = self.shell.colors
349 349
350 350 self.aliases = {}
351 351
352 352 # Create color table: we copy the default one from the traceback
353 353 # module and add a few attributes needed for debugging
354 354 self.color_scheme_table = exception_colors()
355 355
356 356 # shorthands
357 357 C = coloransi.TermColors
358 358 cst = self.color_scheme_table
359 359
360 360 cst['NoColor'].colors.prompt = C.NoColor
361 361 cst['NoColor'].colors.breakpoint_enabled = C.NoColor
362 362 cst['NoColor'].colors.breakpoint_disabled = C.NoColor
363 363
364 364 cst['Linux'].colors.prompt = C.Green
365 365 cst['Linux'].colors.breakpoint_enabled = C.LightRed
366 366 cst['Linux'].colors.breakpoint_disabled = C.Red
367 367
368 368 cst['LightBG'].colors.prompt = C.Blue
369 369 cst['LightBG'].colors.breakpoint_enabled = C.LightRed
370 370 cst['LightBG'].colors.breakpoint_disabled = C.Red
371 371
372 372 cst['Neutral'].colors.prompt = C.Blue
373 373 cst['Neutral'].colors.breakpoint_enabled = C.LightRed
374 374 cst['Neutral'].colors.breakpoint_disabled = C.Red
375 375
376 376 # Add a python parser so we can syntax highlight source while
377 377 # debugging.
378 378 self.parser = PyColorize.Parser(style=color_scheme)
379 379 self.set_colors(color_scheme)
380 380
381 381 # Set the prompt - the default prompt is '(Pdb)'
382 382 self.prompt = prompt
383 383 self.skip_hidden = True
384 384 self.report_skipped = True
385 385
386 386 # list of predicates we use to skip frames
387 387 self._predicates = self.default_predicates
388 388
389 389 #
390 390 def set_colors(self, scheme):
391 391 """Shorthand access to the color table scheme selector method."""
392 392 self.color_scheme_table.set_active_scheme(scheme)
393 393 self.parser.style = scheme
394 394
395 395 def set_trace(self, frame=None):
396 396 if frame is None:
397 397 frame = sys._getframe().f_back
398 398 self.initial_frame = frame
399 399 return super().set_trace(frame)
400 400
401 401 def _hidden_predicate(self, frame):
402 402 """
403 403 Given a frame return whether it it should be hidden or not by IPython.
404 404 """
405 405
406 406 if self._predicates["readonly"]:
407 407 fname = frame.f_code.co_filename
408 408 # we need to check for file existence and interactively define
409 409 # function would otherwise appear as RO.
410 410 if os.path.isfile(fname) and not os.access(fname, os.W_OK):
411 411 return True
412 412
413 413 if self._predicates["tbhide"]:
414 414 if frame in (self.curframe, getattr(self, "initial_frame", None)):
415 415 return False
416 416 else:
417 417 return self._get_frame_locals(frame).get("__tracebackhide__", False)
418 418
419 419 return False
420 420
421 421 def hidden_frames(self, stack):
422 422 """
423 423 Given an index in the stack return whether it should be skipped.
424 424
425 425 This is used in up/down and where to skip frames.
426 426 """
427 427 # The f_locals dictionary is updated from the actual frame
428 428 # locals whenever the .f_locals accessor is called, so we
429 429 # avoid calling it here to preserve self.curframe_locals.
430 # Futhermore, there is no good reason to hide the current frame.
430 # Furthermore, there is no good reason to hide the current frame.
431 431 ip_hide = [self._hidden_predicate(s[0]) for s in stack]
432 432 ip_start = [i for i, s in enumerate(ip_hide) if s == "__ipython_bottom__"]
433 433 if ip_start and self._predicates["ipython_internal"]:
434 434 ip_hide = [h if i > ip_start[0] else True for (i, h) in enumerate(ip_hide)]
435 435 return ip_hide
436 436
437 437 def interaction(self, frame, traceback):
438 438 try:
439 439 OldPdb.interaction(self, frame, traceback)
440 440 except KeyboardInterrupt:
441 441 self.stdout.write("\n" + self.shell.get_exception_only())
442 442
443 443 def precmd(self, line):
444 444 """Perform useful escapes on the command before it is executed."""
445 445
446 446 if line.endswith("??"):
447 447 line = "pinfo2 " + line[:-2]
448 448 elif line.endswith("?"):
449 449 line = "pinfo " + line[:-1]
450 450
451 451 line = super().precmd(line)
452 452
453 453 return line
454 454
455 455 def new_do_frame(self, arg):
456 456 OldPdb.do_frame(self, arg)
457 457
458 458 def new_do_quit(self, arg):
459 459
460 460 if hasattr(self, 'old_all_completions'):
461 461 self.shell.Completer.all_completions = self.old_all_completions
462 462
463 463 return OldPdb.do_quit(self, arg)
464 464
465 465 do_q = do_quit = decorate_fn_with_doc(new_do_quit, OldPdb.do_quit)
466 466
467 467 def new_do_restart(self, arg):
468 468 """Restart command. In the context of ipython this is exactly the same
469 469 thing as 'quit'."""
470 470 self.msg("Restart doesn't make sense here. Using 'quit' instead.")
471 471 return self.do_quit(arg)
472 472
473 473 def print_stack_trace(self, context=None):
474 474 Colors = self.color_scheme_table.active_colors
475 475 ColorsNormal = Colors.Normal
476 476 if context is None:
477 477 context = self.context
478 478 try:
479 479 context = int(context)
480 480 if context <= 0:
481 481 raise ValueError("Context must be a positive integer")
482 482 except (TypeError, ValueError) as e:
483 483 raise ValueError("Context must be a positive integer") from e
484 484 try:
485 485 skipped = 0
486 486 for hidden, frame_lineno in zip(self.hidden_frames(self.stack), self.stack):
487 487 if hidden and self.skip_hidden:
488 488 skipped += 1
489 489 continue
490 490 if skipped:
491 491 print(
492 492 f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n"
493 493 )
494 494 skipped = 0
495 495 self.print_stack_entry(frame_lineno, context=context)
496 496 if skipped:
497 497 print(
498 498 f"{Colors.excName} [... skipping {skipped} hidden frame(s)]{ColorsNormal}\n"
499 499 )
500 500 except KeyboardInterrupt:
501 501 pass
502 502
503 503 def print_stack_entry(self, frame_lineno, prompt_prefix='\n-> ',
504 504 context=None):
505 505 if context is None:
506 506 context = self.context
507 507 try:
508 508 context = int(context)
509 509 if context <= 0:
510 510 raise ValueError("Context must be a positive integer")
511 511 except (TypeError, ValueError) as e:
512 512 raise ValueError("Context must be a positive integer") from e
513 513 print(self.format_stack_entry(frame_lineno, '', context), file=self.stdout)
514 514
515 515 # vds: >>
516 516 frame, lineno = frame_lineno
517 517 filename = frame.f_code.co_filename
518 518 self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
519 519 # vds: <<
520 520
521 521 def _get_frame_locals(self, frame):
522 522 """ "
523 Acessing f_local of current frame reset the namespace, so we want to avoid
524 that or the following can happend
523 Accessing f_local of current frame reset the namespace, so we want to avoid
524 that or the following can happen
525 525
526 526 ipdb> foo
527 527 "old"
528 528 ipdb> foo = "new"
529 529 ipdb> foo
530 530 "new"
531 531 ipdb> where
532 532 ipdb> foo
533 533 "old"
534 534
535 535 So if frame is self.current_frame we instead return self.curframe_locals
536 536
537 537 """
538 538 if frame is self.curframe:
539 539 return self.curframe_locals
540 540 else:
541 541 return frame.f_locals
542 542
543 543 def format_stack_entry(self, frame_lineno, lprefix=': ', context=None):
544 544 if context is None:
545 545 context = self.context
546 546 try:
547 547 context = int(context)
548 548 if context <= 0:
549 549 print("Context must be a positive integer", file=self.stdout)
550 550 except (TypeError, ValueError):
551 551 print("Context must be a positive integer", file=self.stdout)
552 552
553 553 import reprlib
554 554
555 555 ret = []
556 556
557 557 Colors = self.color_scheme_table.active_colors
558 558 ColorsNormal = Colors.Normal
559 559 tpl_link = "%s%%s%s" % (Colors.filenameEm, ColorsNormal)
560 560 tpl_call = "%s%%s%s%%s%s" % (Colors.vName, Colors.valEm, ColorsNormal)
561 561 tpl_line = "%%s%s%%s %s%%s" % (Colors.lineno, ColorsNormal)
562 562 tpl_line_em = "%%s%s%%s %s%%s%s" % (Colors.linenoEm, Colors.line, ColorsNormal)
563 563
564 564 frame, lineno = frame_lineno
565 565
566 566 return_value = ''
567 567 loc_frame = self._get_frame_locals(frame)
568 568 if "__return__" in loc_frame:
569 569 rv = loc_frame["__return__"]
570 570 # return_value += '->'
571 571 return_value += reprlib.repr(rv) + "\n"
572 572 ret.append(return_value)
573 573
574 574 #s = filename + '(' + `lineno` + ')'
575 575 filename = self.canonic(frame.f_code.co_filename)
576 576 link = tpl_link % py3compat.cast_unicode(filename)
577 577
578 578 if frame.f_code.co_name:
579 579 func = frame.f_code.co_name
580 580 else:
581 581 func = "<lambda>"
582 582
583 583 call = ""
584 584 if func != "?":
585 585 if "__args__" in loc_frame:
586 586 args = reprlib.repr(loc_frame["__args__"])
587 587 else:
588 588 args = '()'
589 589 call = tpl_call % (func, args)
590 590
591 591 # The level info should be generated in the same format pdb uses, to
592 592 # avoid breaking the pdbtrack functionality of python-mode in *emacs.
593 593 if frame is self.curframe:
594 594 ret.append('> ')
595 595 else:
596 596 ret.append(" ")
597 597 ret.append("%s(%s)%s\n" % (link, lineno, call))
598 598
599 599 start = lineno - 1 - context//2
600 600 lines = linecache.getlines(filename)
601 601 start = min(start, len(lines) - context)
602 602 start = max(start, 0)
603 603 lines = lines[start : start + context]
604 604
605 605 for i, line in enumerate(lines):
606 606 show_arrow = start + 1 + i == lineno
607 607 linetpl = (frame is self.curframe or show_arrow) and tpl_line_em or tpl_line
608 608 ret.append(
609 609 self.__format_line(
610 610 linetpl, filename, start + 1 + i, line, arrow=show_arrow
611 611 )
612 612 )
613 613 return "".join(ret)
614 614
615 615 def __format_line(self, tpl_line, filename, lineno, line, arrow=False):
616 616 bp_mark = ""
617 617 bp_mark_color = ""
618 618
619 619 new_line, err = self.parser.format2(line, 'str')
620 620 if not err:
621 621 line = new_line
622 622
623 623 bp = None
624 624 if lineno in self.get_file_breaks(filename):
625 625 bps = self.get_breaks(filename, lineno)
626 626 bp = bps[-1]
627 627
628 628 if bp:
629 629 Colors = self.color_scheme_table.active_colors
630 630 bp_mark = str(bp.number)
631 631 bp_mark_color = Colors.breakpoint_enabled
632 632 if not bp.enabled:
633 633 bp_mark_color = Colors.breakpoint_disabled
634 634
635 635 numbers_width = 7
636 636 if arrow:
637 637 # This is the line with the error
638 638 pad = numbers_width - len(str(lineno)) - len(bp_mark)
639 639 num = '%s%s' % (make_arrow(pad), str(lineno))
640 640 else:
641 641 num = '%*s' % (numbers_width - len(bp_mark), str(lineno))
642 642
643 643 return tpl_line % (bp_mark_color + bp_mark, num, line)
644 644
645 645 def print_list_lines(self, filename, first, last):
646 646 """The printing (as opposed to the parsing part of a 'list'
647 647 command."""
648 648 try:
649 649 Colors = self.color_scheme_table.active_colors
650 650 ColorsNormal = Colors.Normal
651 651 tpl_line = '%%s%s%%s %s%%s' % (Colors.lineno, ColorsNormal)
652 652 tpl_line_em = '%%s%s%%s %s%%s%s' % (Colors.linenoEm, Colors.line, ColorsNormal)
653 653 src = []
654 654 if filename == "<string>" and hasattr(self, "_exec_filename"):
655 655 filename = self._exec_filename
656 656
657 657 for lineno in range(first, last+1):
658 658 line = linecache.getline(filename, lineno)
659 659 if not line:
660 660 break
661 661
662 662 if lineno == self.curframe.f_lineno:
663 663 line = self.__format_line(
664 664 tpl_line_em, filename, lineno, line, arrow=True
665 665 )
666 666 else:
667 667 line = self.__format_line(
668 668 tpl_line, filename, lineno, line, arrow=False
669 669 )
670 670
671 671 src.append(line)
672 672 self.lineno = lineno
673 673
674 674 print(''.join(src), file=self.stdout)
675 675
676 676 except KeyboardInterrupt:
677 677 pass
678 678
679 679 def do_skip_predicates(self, args):
680 680 """
681 681 Turn on/off individual predicates as to whether a frame should be hidden/skip.
682 682
683 683 The global option to skip (or not) hidden frames is set with skip_hidden
684 684
685 685 To change the value of a predicate
686 686
687 687 skip_predicates key [true|false]
688 688
689 689 Call without arguments to see the current values.
690 690
691 691 To permanently change the value of an option add the corresponding
692 692 command to your ``~/.pdbrc`` file. If you are programmatically using the
693 693 Pdb instance you can also change the ``default_predicates`` class
694 694 attribute.
695 695 """
696 696 if not args.strip():
697 697 print("current predicates:")
698 698 for (p, v) in self._predicates.items():
699 699 print(" ", p, ":", v)
700 700 return
701 701 type_value = args.strip().split(" ")
702 702 if len(type_value) != 2:
703 703 print(
704 704 f"Usage: skip_predicates <type> <value>, with <type> one of {set(self._predicates.keys())}"
705 705 )
706 706 return
707 707
708 708 type_, value = type_value
709 709 if type_ not in self._predicates:
710 710 print(f"{type_!r} not in {set(self._predicates.keys())}")
711 711 return
712 712 if value.lower() not in ("true", "yes", "1", "no", "false", "0"):
713 713 print(
714 714 f"{value!r} is invalid - use one of ('true', 'yes', '1', 'no', 'false', '0')"
715 715 )
716 716 return
717 717
718 718 self._predicates[type_] = value.lower() in ("true", "yes", "1")
719 719 if not any(self._predicates.values()):
720 720 print(
721 721 "Warning, all predicates set to False, skip_hidden may not have any effects."
722 722 )
723 723
724 724 def do_skip_hidden(self, arg):
725 725 """
726 726 Change whether or not we should skip frames with the
727 727 __tracebackhide__ attribute.
728 728 """
729 729 if not arg.strip():
730 730 print(
731 731 f"skip_hidden = {self.skip_hidden}, use 'yes','no', 'true', or 'false' to change."
732 732 )
733 733 elif arg.strip().lower() in ("true", "yes"):
734 734 self.skip_hidden = True
735 735 elif arg.strip().lower() in ("false", "no"):
736 736 self.skip_hidden = False
737 737 if not any(self._predicates.values()):
738 738 print(
739 739 "Warning, all predicates set to False, skip_hidden may not have any effects."
740 740 )
741 741
742 742 def do_list(self, arg):
743 743 """Print lines of code from the current stack frame
744 744 """
745 745 self.lastcmd = 'list'
746 746 last = None
747 747 if arg:
748 748 try:
749 749 x = eval(arg, {}, {})
750 750 if type(x) == type(()):
751 751 first, last = x
752 752 first = int(first)
753 753 last = int(last)
754 754 if last < first:
755 755 # Assume it's a count
756 756 last = first + last
757 757 else:
758 758 first = max(1, int(x) - 5)
759 759 except:
760 760 print('*** Error in argument:', repr(arg), file=self.stdout)
761 761 return
762 762 elif self.lineno is None:
763 763 first = max(1, self.curframe.f_lineno - 5)
764 764 else:
765 765 first = self.lineno + 1
766 766 if last is None:
767 767 last = first + 10
768 768 self.print_list_lines(self.curframe.f_code.co_filename, first, last)
769 769
770 770 # vds: >>
771 771 lineno = first
772 772 filename = self.curframe.f_code.co_filename
773 773 self.shell.hooks.synchronize_with_editor(filename, lineno, 0)
774 774 # vds: <<
775 775
776 776 do_l = do_list
777 777
778 778 def getsourcelines(self, obj):
779 779 lines, lineno = inspect.findsource(obj)
780 780 if inspect.isframe(obj) and obj.f_globals is self._get_frame_locals(obj):
781 781 # must be a module frame: do not try to cut a block out of it
782 782 return lines, 1
783 783 elif inspect.ismodule(obj):
784 784 return lines, 1
785 785 return inspect.getblock(lines[lineno:]), lineno+1
786 786
787 787 def do_longlist(self, arg):
788 788 """Print lines of code from the current stack frame.
789 789
790 790 Shows more lines than 'list' does.
791 791 """
792 792 self.lastcmd = 'longlist'
793 793 try:
794 794 lines, lineno = self.getsourcelines(self.curframe)
795 795 except OSError as err:
796 796 self.error(err)
797 797 return
798 798 last = lineno + len(lines)
799 799 self.print_list_lines(self.curframe.f_code.co_filename, lineno, last)
800 800 do_ll = do_longlist
801 801
802 802 def do_debug(self, arg):
803 803 """debug code
804 804 Enter a recursive debugger that steps through the code
805 805 argument (which is an arbitrary expression or statement to be
806 806 executed in the current environment).
807 807 """
808 808 trace_function = sys.gettrace()
809 809 sys.settrace(None)
810 810 globals = self.curframe.f_globals
811 811 locals = self.curframe_locals
812 812 p = self.__class__(completekey=self.completekey,
813 813 stdin=self.stdin, stdout=self.stdout)
814 814 p.use_rawinput = self.use_rawinput
815 815 p.prompt = "(%s) " % self.prompt.strip()
816 816 self.message("ENTERING RECURSIVE DEBUGGER")
817 817 sys.call_tracing(p.run, (arg, globals, locals))
818 818 self.message("LEAVING RECURSIVE DEBUGGER")
819 819 sys.settrace(trace_function)
820 820 self.lastcmd = p.lastcmd
821 821
822 822 def do_pdef(self, arg):
823 823 """Print the call signature for any callable object.
824 824
825 825 The debugger interface to %pdef"""
826 826 namespaces = [
827 827 ("Locals", self.curframe_locals),
828 828 ("Globals", self.curframe.f_globals),
829 829 ]
830 830 self.shell.find_line_magic("pdef")(arg, namespaces=namespaces)
831 831
832 832 def do_pdoc(self, arg):
833 833 """Print the docstring for an object.
834 834
835 835 The debugger interface to %pdoc."""
836 836 namespaces = [
837 837 ("Locals", self.curframe_locals),
838 838 ("Globals", self.curframe.f_globals),
839 839 ]
840 840 self.shell.find_line_magic("pdoc")(arg, namespaces=namespaces)
841 841
842 842 def do_pfile(self, arg):
843 843 """Print (or run through pager) the file where an object is defined.
844 844
845 845 The debugger interface to %pfile.
846 846 """
847 847 namespaces = [
848 848 ("Locals", self.curframe_locals),
849 849 ("Globals", self.curframe.f_globals),
850 850 ]
851 851 self.shell.find_line_magic("pfile")(arg, namespaces=namespaces)
852 852
853 853 def do_pinfo(self, arg):
854 854 """Provide detailed information about an object.
855 855
856 856 The debugger interface to %pinfo, i.e., obj?."""
857 857 namespaces = [
858 858 ("Locals", self.curframe_locals),
859 859 ("Globals", self.curframe.f_globals),
860 860 ]
861 861 self.shell.find_line_magic("pinfo")(arg, namespaces=namespaces)
862 862
863 863 def do_pinfo2(self, arg):
864 864 """Provide extra detailed information about an object.
865 865
866 866 The debugger interface to %pinfo2, i.e., obj??."""
867 867 namespaces = [
868 868 ("Locals", self.curframe_locals),
869 869 ("Globals", self.curframe.f_globals),
870 870 ]
871 871 self.shell.find_line_magic("pinfo2")(arg, namespaces=namespaces)
872 872
873 873 def do_psource(self, arg):
874 874 """Print (or run through pager) the source code for an object."""
875 875 namespaces = [
876 876 ("Locals", self.curframe_locals),
877 877 ("Globals", self.curframe.f_globals),
878 878 ]
879 879 self.shell.find_line_magic("psource")(arg, namespaces=namespaces)
880 880
881 881 def do_where(self, arg):
882 882 """w(here)
883 883 Print a stack trace, with the most recent frame at the bottom.
884 884 An arrow indicates the "current frame", which determines the
885 885 context of most commands. 'bt' is an alias for this command.
886 886
887 887 Take a number as argument as an (optional) number of context line to
888 888 print"""
889 889 if arg:
890 890 try:
891 891 context = int(arg)
892 892 except ValueError as err:
893 893 self.error(err)
894 894 return
895 895 self.print_stack_trace(context)
896 896 else:
897 897 self.print_stack_trace()
898 898
899 899 do_w = do_where
900 900
901 901 def break_anywhere(self, frame):
902 902 """
903 903
904 904 _stop_in_decorator_internals is overly restrictive, as we may still want
905 905 to trace function calls, so we need to also update break_anywhere so
906 906 that is we don't `stop_here`, because of debugger skip, we may still
907 907 stop at any point inside the function
908 908
909 909 """
910 910
911 911 sup = super().break_anywhere(frame)
912 912 if sup:
913 913 return sup
914 914 if self._predicates["debuggerskip"]:
915 915 if DEBUGGERSKIP in frame.f_code.co_varnames:
916 916 return True
917 917 if frame.f_back and self._get_frame_locals(frame.f_back).get(DEBUGGERSKIP):
918 918 return True
919 919 return False
920 920
921 921 @skip_doctest
922 922 def _is_in_decorator_internal_and_should_skip(self, frame):
923 923 """
924 924 Utility to tell us whether we are in a decorator internal and should stop.
925 925
926 926
927 927
928 928 """
929 929
930 930 # if we are disabled don't skip
931 931 if not self._predicates["debuggerskip"]:
932 932 return False
933 933
934 934 # if frame is tagged, skip by default.
935 935 if DEBUGGERSKIP in frame.f_code.co_varnames:
936 936 return True
937 937
938 938 # if one of the parent frame value set to True skip as well.
939 939
940 940 cframe = frame
941 941 while getattr(cframe, "f_back", None):
942 942 cframe = cframe.f_back
943 943 if self._get_frame_locals(cframe).get(DEBUGGERSKIP):
944 944 return True
945 945
946 946 return False
947 947
948 948 def stop_here(self, frame):
949 949
950 950 if self._is_in_decorator_internal_and_should_skip(frame) is True:
951 951 return False
952 952
953 953 hidden = False
954 954 if self.skip_hidden:
955 955 hidden = self._hidden_predicate(frame)
956 956 if hidden:
957 957 if self.report_skipped:
958 958 Colors = self.color_scheme_table.active_colors
959 959 ColorsNormal = Colors.Normal
960 960 print(
961 961 f"{Colors.excName} [... skipped 1 hidden frame]{ColorsNormal}\n"
962 962 )
963 963 return super().stop_here(frame)
964 964
965 965 def do_up(self, arg):
966 966 """u(p) [count]
967 967 Move the current frame count (default one) levels up in the
968 968 stack trace (to an older frame).
969 969
970 970 Will skip hidden frames.
971 971 """
972 972 # modified version of upstream that skips
973 973 # frames with __tracebackhide__
974 974 if self.curindex == 0:
975 975 self.error("Oldest frame")
976 976 return
977 977 try:
978 978 count = int(arg or 1)
979 979 except ValueError:
980 980 self.error("Invalid frame count (%s)" % arg)
981 981 return
982 982 skipped = 0
983 983 if count < 0:
984 984 _newframe = 0
985 985 else:
986 986 counter = 0
987 987 hidden_frames = self.hidden_frames(self.stack)
988 988 for i in range(self.curindex - 1, -1, -1):
989 989 if hidden_frames[i] and self.skip_hidden:
990 990 skipped += 1
991 991 continue
992 992 counter += 1
993 993 if counter >= count:
994 994 break
995 995 else:
996 # if no break occured.
996 # if no break occurred.
997 997 self.error(
998 998 "all frames above hidden, use `skip_hidden False` to get get into those."
999 999 )
1000 1000 return
1001 1001
1002 1002 Colors = self.color_scheme_table.active_colors
1003 1003 ColorsNormal = Colors.Normal
1004 1004 _newframe = i
1005 1005 self._select_frame(_newframe)
1006 1006 if skipped:
1007 1007 print(
1008 1008 f"{Colors.excName} [... skipped {skipped} hidden frame(s)]{ColorsNormal}\n"
1009 1009 )
1010 1010
1011 1011 def do_down(self, arg):
1012 1012 """d(own) [count]
1013 1013 Move the current frame count (default one) levels down in the
1014 1014 stack trace (to a newer frame).
1015 1015
1016 1016 Will skip hidden frames.
1017 1017 """
1018 1018 if self.curindex + 1 == len(self.stack):
1019 1019 self.error("Newest frame")
1020 1020 return
1021 1021 try:
1022 1022 count = int(arg or 1)
1023 1023 except ValueError:
1024 1024 self.error("Invalid frame count (%s)" % arg)
1025 1025 return
1026 1026 if count < 0:
1027 1027 _newframe = len(self.stack) - 1
1028 1028 else:
1029 1029 counter = 0
1030 1030 skipped = 0
1031 1031 hidden_frames = self.hidden_frames(self.stack)
1032 1032 for i in range(self.curindex + 1, len(self.stack)):
1033 1033 if hidden_frames[i] and self.skip_hidden:
1034 1034 skipped += 1
1035 1035 continue
1036 1036 counter += 1
1037 1037 if counter >= count:
1038 1038 break
1039 1039 else:
1040 1040 self.error(
1041 "all frames bellow hidden, use `skip_hidden False` to get get into those."
1041 "all frames below hidden, use `skip_hidden False` to get get into those."
1042 1042 )
1043 1043 return
1044 1044
1045 1045 Colors = self.color_scheme_table.active_colors
1046 1046 ColorsNormal = Colors.Normal
1047 1047 if skipped:
1048 1048 print(
1049 1049 f"{Colors.excName} [... skipped {skipped} hidden frame(s)]{ColorsNormal}\n"
1050 1050 )
1051 1051 _newframe = i
1052 1052
1053 1053 self._select_frame(_newframe)
1054 1054
1055 1055 do_d = do_down
1056 1056 do_u = do_up
1057 1057
1058 1058 def do_context(self, context):
1059 1059 """context number_of_lines
1060 1060 Set the number of lines of source code to show when displaying
1061 1061 stacktrace information.
1062 1062 """
1063 1063 try:
1064 1064 new_context = int(context)
1065 1065 if new_context <= 0:
1066 1066 raise ValueError()
1067 1067 self.context = new_context
1068 1068 except ValueError:
1069 1069 self.error("The 'context' command requires a positive integer argument.")
1070 1070
1071 1071
1072 1072 class InterruptiblePdb(Pdb):
1073 1073 """Version of debugger where KeyboardInterrupt exits the debugger altogether."""
1074 1074
1075 1075 def cmdloop(self, intro=None):
1076 1076 """Wrap cmdloop() such that KeyboardInterrupt stops the debugger."""
1077 1077 try:
1078 1078 return OldPdb.cmdloop(self, intro=intro)
1079 1079 except KeyboardInterrupt:
1080 1080 self.stop_here = lambda frame: False
1081 1081 self.do_quit("")
1082 1082 sys.settrace(None)
1083 1083 self.quitting = False
1084 1084 raise
1085 1085
1086 1086 def _cmdloop(self):
1087 1087 while True:
1088 1088 try:
1089 1089 # keyboard interrupts allow for an easy way to cancel
1090 1090 # the current command, so allow them during interactive input
1091 1091 self.allow_kbdint = True
1092 1092 self.cmdloop()
1093 1093 self.allow_kbdint = False
1094 1094 break
1095 1095 except KeyboardInterrupt:
1096 1096 self.message('--KeyboardInterrupt--')
1097 1097 raise
1098 1098
1099 1099
1100 1100 def set_trace(frame=None):
1101 1101 """
1102 1102 Start debugging from `frame`.
1103 1103
1104 1104 If frame is not specified, debugging starts from caller's frame.
1105 1105 """
1106 1106 Pdb().set_trace(frame or sys._getframe().f_back)
@@ -1,66 +1,66 b''
1 1 from IPython.utils.capture import capture_output
2 2
3 3 import pytest
4 4
5 5 def test_alias_lifecycle():
6 6 name = 'test_alias1'
7 7 cmd = 'echo "Hello"'
8 8 am = _ip.alias_manager
9 9 am.clear_aliases()
10 10 am.define_alias(name, cmd)
11 11 assert am.is_alias(name)
12 12 assert am.retrieve_alias(name) == cmd
13 13 assert (name, cmd) in am.aliases
14 14
15 15 # Test running the alias
16 16 orig_system = _ip.system
17 17 result = []
18 18 _ip.system = result.append
19 19 try:
20 20 _ip.run_cell('%{}'.format(name))
21 21 result = [c.strip() for c in result]
22 22 assert result == [cmd]
23 23 finally:
24 24 _ip.system = orig_system
25 25
26 26 # Test removing the alias
27 27 am.undefine_alias(name)
28 28 assert not am.is_alias(name)
29 29 with pytest.raises(ValueError):
30 30 am.retrieve_alias(name)
31 31 assert (name, cmd) not in am.aliases
32 32
33 33
34 34 def test_alias_args_error():
35 35 """Error expanding with wrong number of arguments"""
36 36 _ip.alias_manager.define_alias('parts', 'echo first %s second %s')
37 37 # capture stderr:
38 38 with capture_output() as cap:
39 39 _ip.run_cell('parts 1')
40 40
41 41 assert cap.stderr.split(":")[0] == "UsageError"
42 42
43 43
44 44 def test_alias_args_commented():
45 45 """Check that alias correctly ignores 'commented out' args"""
46 46 _ip.magic('alias commetarg echo this is %%s a commented out arg')
47 47
48 48 with capture_output() as cap:
49 49 _ip.run_cell('commetarg')
50 50
51 51 # strip() is for pytest compat; testing via iptest patch IPython shell
52 # in testin.globalipapp and replace the system call which messed up the
52 # in testing.globalipapp and replace the system call which messed up the
53 53 # \r\n
54 54 assert cap.stdout.strip() == 'this is %s a commented out arg'
55 55
56 56 def test_alias_args_commented_nargs():
57 57 """Check that alias correctly counts args, excluding those commented out"""
58 58 am = _ip.alias_manager
59 59 alias_name = 'comargcount'
60 60 cmd = 'echo this is %%s a commented out arg and this is not %s'
61 61
62 62 am.define_alias(alias_name, cmd)
63 63 assert am.is_alias(alias_name)
64 64
65 65 thealias = am.get_alias(alias_name)
66 66 assert thealias.nargs == 1
@@ -1,1278 +1,1278 b''
1 1 # encoding: utf-8
2 2 """Tests for the IPython tab-completion machinery."""
3 3
4 4 # Copyright (c) IPython Development Team.
5 5 # Distributed under the terms of the Modified BSD License.
6 6
7 7 import os
8 8 import sys
9 9 import textwrap
10 10 import unittest
11 11
12 12 from contextlib import contextmanager
13 13
14 14 import nose.tools as nt
15 15
16 16 from traitlets.config.loader import Config
17 17 from IPython import get_ipython
18 18 from IPython.core import completer
19 19 from IPython.external import decorators
20 20 from IPython.utils.tempdir import TemporaryDirectory, TemporaryWorkingDirectory
21 21 from IPython.utils.generics import complete_object
22 22 from IPython.testing import decorators as dec
23 23
24 24 from IPython.core.completer import (
25 25 Completion,
26 26 provisionalcompleter,
27 27 match_dict_keys,
28 28 _deduplicate_completions,
29 29 )
30 30 from nose.tools import assert_in, assert_not_in
31 31
32 32 # -----------------------------------------------------------------------------
33 33 # Test functions
34 34 # -----------------------------------------------------------------------------
35 35
36 36 def recompute_unicode_ranges():
37 37 """
38 38 utility to recompute the largest unicode range without any characters
39 39
40 40 use to recompute the gap in the global _UNICODE_RANGES of completer.py
41 41 """
42 42 import itertools
43 43 import unicodedata
44 44 valid = []
45 45 for c in range(0,0x10FFFF + 1):
46 46 try:
47 47 unicodedata.name(chr(c))
48 48 except ValueError:
49 49 continue
50 50 valid.append(c)
51 51
52 52 def ranges(i):
53 53 for a, b in itertools.groupby(enumerate(i), lambda pair: pair[1] - pair[0]):
54 54 b = list(b)
55 55 yield b[0][1], b[-1][1]
56 56
57 57 rg = list(ranges(valid))
58 58 lens = []
59 59 gap_lens = []
60 60 pstart, pstop = 0,0
61 61 for start, stop in rg:
62 62 lens.append(stop-start)
63 63 gap_lens.append((start - pstop, hex(pstop), hex(start), f'{round((start - pstop)/0xe01f0*100)}%'))
64 64 pstart, pstop = start, stop
65 65
66 66 return sorted(gap_lens)[-1]
67 67
68 68
69 69
70 70 def test_unicode_range():
71 71 """
72 72 Test that the ranges we test for unicode names give the same number of
73 73 results than testing the full length.
74 74 """
75 75 from IPython.core.completer import _unicode_name_compute, _UNICODE_RANGES
76 76
77 77 expected_list = _unicode_name_compute([(0, 0x110000)])
78 78 test = _unicode_name_compute(_UNICODE_RANGES)
79 79 len_exp = len(expected_list)
80 80 len_test = len(test)
81 81
82 82 # do not inline the len() or on error pytest will try to print the 130 000 +
83 83 # elements.
84 84 message = None
85 85 if len_exp != len_test or len_exp > 131808:
86 86 size, start, stop, prct = recompute_unicode_ranges()
87 87 message = f"""_UNICODE_RANGES likely wrong and need updating. This is
88 88 likely due to a new release of Python. We've find that the biggest gap
89 in unicode characters has reduces in size to be {size} charaters
89 in unicode characters has reduces in size to be {size} characters
90 90 ({prct}), from {start}, to {stop}. In completer.py likely update to
91 91
92 92 _UNICODE_RANGES = [(32, {start}), ({stop}, 0xe01f0)]
93 93
94 94 And update the assertion below to use
95 95
96 96 len_exp <= {len_exp}
97 97 """
98 98 assert len_exp == len_test, message
99 99
100 100 # fail if new unicode symbols have been added.
101 101 assert len_exp <= 137714, message
102 102
103 103
104 104 @contextmanager
105 105 def greedy_completion():
106 106 ip = get_ipython()
107 107 greedy_original = ip.Completer.greedy
108 108 try:
109 109 ip.Completer.greedy = True
110 110 yield
111 111 finally:
112 112 ip.Completer.greedy = greedy_original
113 113
114 114
115 115 def test_protect_filename():
116 116 if sys.platform == "win32":
117 117 pairs = [
118 118 ("abc", "abc"),
119 119 (" abc", '" abc"'),
120 120 ("a bc", '"a bc"'),
121 121 ("a bc", '"a bc"'),
122 122 (" bc", '" bc"'),
123 123 ]
124 124 else:
125 125 pairs = [
126 126 ("abc", "abc"),
127 127 (" abc", r"\ abc"),
128 128 ("a bc", r"a\ bc"),
129 129 ("a bc", r"a\ \ bc"),
130 130 (" bc", r"\ \ bc"),
131 131 # On posix, we also protect parens and other special characters.
132 132 ("a(bc", r"a\(bc"),
133 133 ("a)bc", r"a\)bc"),
134 134 ("a( )bc", r"a\(\ \)bc"),
135 135 ("a[1]bc", r"a\[1\]bc"),
136 136 ("a{1}bc", r"a\{1\}bc"),
137 137 ("a#bc", r"a\#bc"),
138 138 ("a?bc", r"a\?bc"),
139 139 ("a=bc", r"a\=bc"),
140 140 ("a\\bc", r"a\\bc"),
141 141 ("a|bc", r"a\|bc"),
142 142 ("a;bc", r"a\;bc"),
143 143 ("a:bc", r"a\:bc"),
144 144 ("a'bc", r"a\'bc"),
145 145 ("a*bc", r"a\*bc"),
146 146 ('a"bc', r"a\"bc"),
147 147 ("a^bc", r"a\^bc"),
148 148 ("a&bc", r"a\&bc"),
149 149 ]
150 150 # run the actual tests
151 151 for s1, s2 in pairs:
152 152 s1p = completer.protect_filename(s1)
153 153 nt.assert_equal(s1p, s2)
154 154
155 155
156 156 def check_line_split(splitter, test_specs):
157 157 for part1, part2, split in test_specs:
158 158 cursor_pos = len(part1)
159 159 line = part1 + part2
160 160 out = splitter.split_line(line, cursor_pos)
161 161 nt.assert_equal(out, split)
162 162
163 163
164 164 def test_line_split():
165 165 """Basic line splitter test with default specs."""
166 166 sp = completer.CompletionSplitter()
167 167 # The format of the test specs is: part1, part2, expected answer. Parts 1
168 168 # and 2 are joined into the 'line' sent to the splitter, as if the cursor
169 169 # was at the end of part1. So an empty part2 represents someone hitting
170 170 # tab at the end of the line, the most common case.
171 171 t = [
172 172 ("run some/scrip", "", "some/scrip"),
173 173 ("run scripts/er", "ror.py foo", "scripts/er"),
174 174 ("echo $HOM", "", "HOM"),
175 175 ("print sys.pa", "", "sys.pa"),
176 176 ("print(sys.pa", "", "sys.pa"),
177 177 ("execfile('scripts/er", "", "scripts/er"),
178 178 ("a[x.", "", "x."),
179 179 ("a[x.", "y", "x."),
180 180 ('cd "some_file/', "", "some_file/"),
181 181 ]
182 182 check_line_split(sp, t)
183 183 # Ensure splitting works OK with unicode by re-running the tests with
184 184 # all inputs turned into unicode
185 185 check_line_split(sp, [map(str, p) for p in t])
186 186
187 187
188 188 class NamedInstanceMetaclass(type):
189 189 def __getitem__(cls, item):
190 190 return cls.get_instance(item)
191 191
192 192
193 193 class NamedInstanceClass(metaclass=NamedInstanceMetaclass):
194 194 def __init__(self, name):
195 195 if not hasattr(self.__class__, "instances"):
196 196 self.__class__.instances = {}
197 197 self.__class__.instances[name] = self
198 198
199 199 @classmethod
200 200 def _ipython_key_completions_(cls):
201 201 return cls.instances.keys()
202 202
203 203 @classmethod
204 204 def get_instance(cls, name):
205 205 return cls.instances[name]
206 206
207 207
208 208 class KeyCompletable:
209 209 def __init__(self, things=()):
210 210 self.things = things
211 211
212 212 def _ipython_key_completions_(self):
213 213 return list(self.things)
214 214
215 215
216 216 class TestCompleter(unittest.TestCase):
217 217 def setUp(self):
218 218 """
219 219 We want to silence all PendingDeprecationWarning when testing the completer
220 220 """
221 221 self._assertwarns = self.assertWarns(PendingDeprecationWarning)
222 222 self._assertwarns.__enter__()
223 223
224 224 def tearDown(self):
225 225 try:
226 226 self._assertwarns.__exit__(None, None, None)
227 227 except AssertionError:
228 228 pass
229 229
230 230 def test_custom_completion_error(self):
231 231 """Test that errors from custom attribute completers are silenced."""
232 232 ip = get_ipython()
233 233
234 234 class A:
235 235 pass
236 236
237 237 ip.user_ns["x"] = A()
238 238
239 239 @complete_object.register(A)
240 240 def complete_A(a, existing_completions):
241 241 raise TypeError("this should be silenced")
242 242
243 243 ip.complete("x.")
244 244
245 245 def test_custom_completion_ordering(self):
246 246 """Test that errors from custom attribute completers are silenced."""
247 247 ip = get_ipython()
248 248
249 249 _, matches = ip.complete('in')
250 250 assert matches.index('input') < matches.index('int')
251 251
252 252 def complete_example(a):
253 253 return ['example2', 'example1']
254 254
255 255 ip.Completer.custom_completers.add_re('ex*', complete_example)
256 256 _, matches = ip.complete('ex')
257 257 assert matches.index('example2') < matches.index('example1')
258 258
259 259 def test_unicode_completions(self):
260 260 ip = get_ipython()
261 261 # Some strings that trigger different types of completion. Check them both
262 262 # in str and unicode forms
263 263 s = ["ru", "%ru", "cd /", "floa", "float(x)/"]
264 264 for t in s + list(map(str, s)):
265 265 # We don't need to check exact completion values (they may change
266 266 # depending on the state of the namespace, but at least no exceptions
267 267 # should be thrown and the return value should be a pair of text, list
268 268 # values.
269 269 text, matches = ip.complete(t)
270 270 nt.assert_true(isinstance(text, str))
271 271 nt.assert_true(isinstance(matches, list))
272 272
273 273 def test_latex_completions(self):
274 274 from IPython.core.latex_symbols import latex_symbols
275 275 import random
276 276
277 277 ip = get_ipython()
278 278 # Test some random unicode symbols
279 279 keys = random.sample(latex_symbols.keys(), 10)
280 280 for k in keys:
281 281 text, matches = ip.complete(k)
282 282 nt.assert_equal(text, k)
283 283 nt.assert_equal(matches, [latex_symbols[k]])
284 284 # Test a more complex line
285 285 text, matches = ip.complete("print(\\alpha")
286 286 nt.assert_equal(text, "\\alpha")
287 287 nt.assert_equal(matches[0], latex_symbols["\\alpha"])
288 288 # Test multiple matching latex symbols
289 289 text, matches = ip.complete("\\al")
290 290 nt.assert_in("\\alpha", matches)
291 291 nt.assert_in("\\aleph", matches)
292 292
293 293 def test_latex_no_results(self):
294 294 """
295 295 forward latex should really return nothing in either field if nothing is found.
296 296 """
297 297 ip = get_ipython()
298 298 text, matches = ip.Completer.latex_matches("\\really_i_should_match_nothing")
299 299 nt.assert_equal(text, "")
300 300 nt.assert_equal(matches, ())
301 301
302 302 def test_back_latex_completion(self):
303 303 ip = get_ipython()
304 304
305 # do not return more than 1 matches fro \beta, only the latex one.
305 # do not return more than 1 matches for \beta, only the latex one.
306 306 name, matches = ip.complete("\\β")
307 307 nt.assert_equal(matches, ['\\beta'])
308 308
309 309 def test_back_unicode_completion(self):
310 310 ip = get_ipython()
311 311
312 312 name, matches = ip.complete("\\Ⅴ")
313 313 nt.assert_equal(matches, ("\\ROMAN NUMERAL FIVE",))
314 314
315 315 def test_forward_unicode_completion(self):
316 316 ip = get_ipython()
317 317
318 318 name, matches = ip.complete("\\ROMAN NUMERAL FIVE")
319 319 nt.assert_equal(matches, ["Ⅴ"] ) # This is not a V
320 320 nt.assert_equal(matches, ["\u2164"] ) # same as above but explicit.
321 321
322 322 @nt.nottest # now we have a completion for \jmath
323 323 @decorators.knownfailureif(
324 324 sys.platform == "win32", "Fails if there is a C:\\j... path"
325 325 )
326 326 def test_no_ascii_back_completion(self):
327 327 ip = get_ipython()
328 328 with TemporaryWorkingDirectory(): # Avoid any filename completions
329 329 # single ascii letter that don't have yet completions
330 330 for letter in "jJ":
331 331 name, matches = ip.complete("\\" + letter)
332 332 nt.assert_equal(matches, [])
333 333
334 334 class CompletionSplitterTestCase(unittest.TestCase):
335 335 def setUp(self):
336 336 self.sp = completer.CompletionSplitter()
337 337
338 338 def test_delim_setting(self):
339 339 self.sp.delims = " "
340 340 nt.assert_equal(self.sp.delims, " ")
341 341 nt.assert_equal(self.sp._delim_expr, r"[\ ]")
342 342
343 343 def test_spaces(self):
344 344 """Test with only spaces as split chars."""
345 345 self.sp.delims = " "
346 346 t = [("foo", "", "foo"), ("run foo", "", "foo"), ("run foo", "bar", "foo")]
347 347 check_line_split(self.sp, t)
348 348
349 349 def test_has_open_quotes1(self):
350 350 for s in ["'", "'''", "'hi' '"]:
351 351 nt.assert_equal(completer.has_open_quotes(s), "'")
352 352
353 353 def test_has_open_quotes2(self):
354 354 for s in ['"', '"""', '"hi" "']:
355 355 nt.assert_equal(completer.has_open_quotes(s), '"')
356 356
357 357 def test_has_open_quotes3(self):
358 358 for s in ["''", "''' '''", "'hi' 'ipython'"]:
359 359 nt.assert_false(completer.has_open_quotes(s))
360 360
361 361 def test_has_open_quotes4(self):
362 362 for s in ['""', '""" """', '"hi" "ipython"']:
363 363 nt.assert_false(completer.has_open_quotes(s))
364 364
365 365 @decorators.knownfailureif(
366 366 sys.platform == "win32", "abspath completions fail on Windows"
367 367 )
368 368 def test_abspath_file_completions(self):
369 369 ip = get_ipython()
370 370 with TemporaryDirectory() as tmpdir:
371 371 prefix = os.path.join(tmpdir, "foo")
372 372 suffixes = ["1", "2"]
373 373 names = [prefix + s for s in suffixes]
374 374 for n in names:
375 375 open(n, "w").close()
376 376
377 377 # Check simple completion
378 378 c = ip.complete(prefix)[1]
379 379 nt.assert_equal(c, names)
380 380
381 381 # Now check with a function call
382 382 cmd = 'a = f("%s' % prefix
383 383 c = ip.complete(prefix, cmd)[1]
384 384 comp = [prefix + s for s in suffixes]
385 385 nt.assert_equal(c, comp)
386 386
387 387 def test_local_file_completions(self):
388 388 ip = get_ipython()
389 389 with TemporaryWorkingDirectory():
390 390 prefix = "./foo"
391 391 suffixes = ["1", "2"]
392 392 names = [prefix + s for s in suffixes]
393 393 for n in names:
394 394 open(n, "w").close()
395 395
396 396 # Check simple completion
397 397 c = ip.complete(prefix)[1]
398 398 nt.assert_equal(c, names)
399 399
400 400 # Now check with a function call
401 401 cmd = 'a = f("%s' % prefix
402 402 c = ip.complete(prefix, cmd)[1]
403 403 comp = {prefix + s for s in suffixes}
404 404 nt.assert_true(comp.issubset(set(c)))
405 405
406 406 def test_quoted_file_completions(self):
407 407 ip = get_ipython()
408 408 with TemporaryWorkingDirectory():
409 409 name = "foo'bar"
410 410 open(name, "w").close()
411 411
412 412 # Don't escape Windows
413 413 escaped = name if sys.platform == "win32" else "foo\\'bar"
414 414
415 415 # Single quote matches embedded single quote
416 416 text = "open('foo"
417 417 c = ip.Completer._complete(
418 418 cursor_line=0, cursor_pos=len(text), full_text=text
419 419 )[1]
420 420 nt.assert_equal(c, [escaped])
421 421
422 422 # Double quote requires no escape
423 423 text = 'open("foo'
424 424 c = ip.Completer._complete(
425 425 cursor_line=0, cursor_pos=len(text), full_text=text
426 426 )[1]
427 427 nt.assert_equal(c, [name])
428 428
429 429 # No quote requires an escape
430 430 text = "%ls foo"
431 431 c = ip.Completer._complete(
432 432 cursor_line=0, cursor_pos=len(text), full_text=text
433 433 )[1]
434 434 nt.assert_equal(c, [escaped])
435 435
436 436 def test_all_completions_dups(self):
437 437 """
438 438 Make sure the output of `IPCompleter.all_completions` does not have
439 439 duplicated prefixes.
440 440 """
441 441 ip = get_ipython()
442 442 c = ip.Completer
443 443 ip.ex("class TestClass():\n\ta=1\n\ta1=2")
444 444 for jedi_status in [True, False]:
445 445 with provisionalcompleter():
446 446 ip.Completer.use_jedi = jedi_status
447 447 matches = c.all_completions("TestCl")
448 448 assert matches == ['TestClass'], jedi_status
449 449 matches = c.all_completions("TestClass.")
450 450 assert len(matches) > 2, jedi_status
451 451 matches = c.all_completions("TestClass.a")
452 452 assert matches == ['TestClass.a', 'TestClass.a1'], jedi_status
453 453
454 454 def test_jedi(self):
455 455 """
456 456 A couple of issue we had with Jedi
457 457 """
458 458 ip = get_ipython()
459 459
460 460 def _test_complete(reason, s, comp, start=None, end=None):
461 461 l = len(s)
462 462 start = start if start is not None else l
463 463 end = end if end is not None else l
464 464 with provisionalcompleter():
465 465 ip.Completer.use_jedi = True
466 466 completions = set(ip.Completer.completions(s, l))
467 467 ip.Completer.use_jedi = False
468 468 assert_in(Completion(start, end, comp), completions, reason)
469 469
470 470 def _test_not_complete(reason, s, comp):
471 471 l = len(s)
472 472 with provisionalcompleter():
473 473 ip.Completer.use_jedi = True
474 474 completions = set(ip.Completer.completions(s, l))
475 475 ip.Completer.use_jedi = False
476 476 assert_not_in(Completion(l, l, comp), completions, reason)
477 477
478 478 import jedi
479 479
480 480 jedi_version = tuple(int(i) for i in jedi.__version__.split(".")[:3])
481 481 if jedi_version > (0, 10):
482 482 yield _test_complete, "jedi >0.9 should complete and not crash", "a=1;a.", "real"
483 483 yield _test_complete, "can infer first argument", 'a=(1,"foo");a[0].', "real"
484 484 yield _test_complete, "can infer second argument", 'a=(1,"foo");a[1].', "capitalize"
485 485 yield _test_complete, "cover duplicate completions", "im", "import", 0, 2
486 486
487 487 yield _test_not_complete, "does not mix types", 'a=(1,"foo");a[0].', "capitalize"
488 488
489 489 def test_completion_have_signature(self):
490 490 """
491 491 Lets make sure jedi is capable of pulling out the signature of the function we are completing.
492 492 """
493 493 ip = get_ipython()
494 494 with provisionalcompleter():
495 495 ip.Completer.use_jedi = True
496 496 completions = ip.Completer.completions("ope", 3)
497 497 c = next(completions) # should be `open`
498 498 ip.Completer.use_jedi = False
499 499 assert "file" in c.signature, "Signature of function was not found by completer"
500 500 assert (
501 501 "encoding" in c.signature
502 502 ), "Signature of function was not found by completer"
503 503
504 504 def test_deduplicate_completions(self):
505 505 """
506 506 Test that completions are correctly deduplicated (even if ranges are not the same)
507 507 """
508 508 ip = get_ipython()
509 509 ip.ex(
510 510 textwrap.dedent(
511 511 """
512 512 class Z:
513 513 zoo = 1
514 514 """
515 515 )
516 516 )
517 517 with provisionalcompleter():
518 518 ip.Completer.use_jedi = True
519 519 l = list(
520 520 _deduplicate_completions("Z.z", ip.Completer.completions("Z.z", 3))
521 521 )
522 522 ip.Completer.use_jedi = False
523 523
524 524 assert len(l) == 1, "Completions (Z.z<tab>) correctly deduplicate: %s " % l
525 525 assert l[0].text == "zoo" # and not `it.accumulate`
526 526
527 527 def test_greedy_completions(self):
528 528 """
529 529 Test the capability of the Greedy completer.
530 530
531 531 Most of the test here does not really show off the greedy completer, for proof
532 532 each of the text below now pass with Jedi. The greedy completer is capable of more.
533 533
534 534 See the :any:`test_dict_key_completion_contexts`
535 535
536 536 """
537 537 ip = get_ipython()
538 538 ip.ex("a=list(range(5))")
539 539 _, c = ip.complete(".", line="a[0].")
540 540 nt.assert_false(".real" in c, "Shouldn't have completed on a[0]: %s" % c)
541 541
542 542 def _(line, cursor_pos, expect, message, completion):
543 543 with greedy_completion(), provisionalcompleter():
544 544 ip.Completer.use_jedi = False
545 545 _, c = ip.complete(".", line=line, cursor_pos=cursor_pos)
546 546 nt.assert_in(expect, c, message % c)
547 547
548 548 ip.Completer.use_jedi = True
549 549 with provisionalcompleter():
550 550 completions = ip.Completer.completions(line, cursor_pos)
551 551 nt.assert_in(completion, completions)
552 552
553 553 with provisionalcompleter():
554 554 yield _, "a[0].", 5, "a[0].real", "Should have completed on a[0].: %s", Completion(
555 555 5, 5, "real"
556 556 )
557 557 yield _, "a[0].r", 6, "a[0].real", "Should have completed on a[0].r: %s", Completion(
558 558 5, 6, "real"
559 559 )
560 560
561 561 yield _, "a[0].from_", 10, "a[0].from_bytes", "Should have completed on a[0].from_: %s", Completion(
562 562 5, 10, "from_bytes"
563 563 )
564 564
565 565 def test_omit__names(self):
566 566 # also happens to test IPCompleter as a configurable
567 567 ip = get_ipython()
568 568 ip._hidden_attr = 1
569 569 ip._x = {}
570 570 c = ip.Completer
571 571 ip.ex("ip=get_ipython()")
572 572 cfg = Config()
573 573 cfg.IPCompleter.omit__names = 0
574 574 c.update_config(cfg)
575 575 with provisionalcompleter():
576 576 c.use_jedi = False
577 577 s, matches = c.complete("ip.")
578 578 nt.assert_in("ip.__str__", matches)
579 579 nt.assert_in("ip._hidden_attr", matches)
580 580
581 581 # c.use_jedi = True
582 582 # completions = set(c.completions('ip.', 3))
583 583 # nt.assert_in(Completion(3, 3, '__str__'), completions)
584 584 # nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
585 585
586 586 cfg = Config()
587 587 cfg.IPCompleter.omit__names = 1
588 588 c.update_config(cfg)
589 589 with provisionalcompleter():
590 590 c.use_jedi = False
591 591 s, matches = c.complete("ip.")
592 592 nt.assert_not_in("ip.__str__", matches)
593 593 # nt.assert_in('ip._hidden_attr', matches)
594 594
595 595 # c.use_jedi = True
596 596 # completions = set(c.completions('ip.', 3))
597 597 # nt.assert_not_in(Completion(3,3,'__str__'), completions)
598 598 # nt.assert_in(Completion(3,3, "_hidden_attr"), completions)
599 599
600 600 cfg = Config()
601 601 cfg.IPCompleter.omit__names = 2
602 602 c.update_config(cfg)
603 603 with provisionalcompleter():
604 604 c.use_jedi = False
605 605 s, matches = c.complete("ip.")
606 606 nt.assert_not_in("ip.__str__", matches)
607 607 nt.assert_not_in("ip._hidden_attr", matches)
608 608
609 609 # c.use_jedi = True
610 610 # completions = set(c.completions('ip.', 3))
611 611 # nt.assert_not_in(Completion(3,3,'__str__'), completions)
612 612 # nt.assert_not_in(Completion(3,3, "_hidden_attr"), completions)
613 613
614 614 with provisionalcompleter():
615 615 c.use_jedi = False
616 616 s, matches = c.complete("ip._x.")
617 617 nt.assert_in("ip._x.keys", matches)
618 618
619 619 # c.use_jedi = True
620 620 # completions = set(c.completions('ip._x.', 6))
621 621 # nt.assert_in(Completion(6,6, "keys"), completions)
622 622
623 623 del ip._hidden_attr
624 624 del ip._x
625 625
626 626 def test_limit_to__all__False_ok(self):
627 627 """
628 628 Limit to all is deprecated, once we remove it this test can go away.
629 629 """
630 630 ip = get_ipython()
631 631 c = ip.Completer
632 632 c.use_jedi = False
633 633 ip.ex("class D: x=24")
634 634 ip.ex("d=D()")
635 635 cfg = Config()
636 636 cfg.IPCompleter.limit_to__all__ = False
637 637 c.update_config(cfg)
638 638 s, matches = c.complete("d.")
639 639 nt.assert_in("d.x", matches)
640 640
641 641 def test_get__all__entries_ok(self):
642 642 class A:
643 643 __all__ = ["x", 1]
644 644
645 645 words = completer.get__all__entries(A())
646 646 nt.assert_equal(words, ["x"])
647 647
648 648 def test_get__all__entries_no__all__ok(self):
649 649 class A:
650 650 pass
651 651
652 652 words = completer.get__all__entries(A())
653 653 nt.assert_equal(words, [])
654 654
655 655 def test_func_kw_completions(self):
656 656 ip = get_ipython()
657 657 c = ip.Completer
658 658 c.use_jedi = False
659 659 ip.ex("def myfunc(a=1,b=2): return a+b")
660 660 s, matches = c.complete(None, "myfunc(1,b")
661 661 nt.assert_in("b=", matches)
662 662 # Simulate completing with cursor right after b (pos==10):
663 663 s, matches = c.complete(None, "myfunc(1,b)", 10)
664 664 nt.assert_in("b=", matches)
665 665 s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
666 666 nt.assert_in("b=", matches)
667 667 # builtin function
668 668 s, matches = c.complete(None, "min(k, k")
669 669 nt.assert_in("key=", matches)
670 670
671 671 def test_default_arguments_from_docstring(self):
672 672 ip = get_ipython()
673 673 c = ip.Completer
674 674 kwd = c._default_arguments_from_docstring("min(iterable[, key=func]) -> value")
675 675 nt.assert_equal(kwd, ["key"])
676 676 # with cython type etc
677 677 kwd = c._default_arguments_from_docstring(
678 678 "Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
679 679 )
680 680 nt.assert_equal(kwd, ["ncall", "resume", "nsplit"])
681 681 # white spaces
682 682 kwd = c._default_arguments_from_docstring(
683 683 "\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
684 684 )
685 685 nt.assert_equal(kwd, ["ncall", "resume", "nsplit"])
686 686
687 687 def test_line_magics(self):
688 688 ip = get_ipython()
689 689 c = ip.Completer
690 690 s, matches = c.complete(None, "lsmag")
691 691 nt.assert_in("%lsmagic", matches)
692 692 s, matches = c.complete(None, "%lsmag")
693 693 nt.assert_in("%lsmagic", matches)
694 694
695 695 def test_cell_magics(self):
696 696 from IPython.core.magic import register_cell_magic
697 697
698 698 @register_cell_magic
699 699 def _foo_cellm(line, cell):
700 700 pass
701 701
702 702 ip = get_ipython()
703 703 c = ip.Completer
704 704
705 705 s, matches = c.complete(None, "_foo_ce")
706 706 nt.assert_in("%%_foo_cellm", matches)
707 707 s, matches = c.complete(None, "%%_foo_ce")
708 708 nt.assert_in("%%_foo_cellm", matches)
709 709
710 710 def test_line_cell_magics(self):
711 711 from IPython.core.magic import register_line_cell_magic
712 712
713 713 @register_line_cell_magic
714 714 def _bar_cellm(line, cell):
715 715 pass
716 716
717 717 ip = get_ipython()
718 718 c = ip.Completer
719 719
720 720 # The policy here is trickier, see comments in completion code. The
721 721 # returned values depend on whether the user passes %% or not explicitly,
722 722 # and this will show a difference if the same name is both a line and cell
723 723 # magic.
724 724 s, matches = c.complete(None, "_bar_ce")
725 725 nt.assert_in("%_bar_cellm", matches)
726 726 nt.assert_in("%%_bar_cellm", matches)
727 727 s, matches = c.complete(None, "%_bar_ce")
728 728 nt.assert_in("%_bar_cellm", matches)
729 729 nt.assert_in("%%_bar_cellm", matches)
730 730 s, matches = c.complete(None, "%%_bar_ce")
731 731 nt.assert_not_in("%_bar_cellm", matches)
732 732 nt.assert_in("%%_bar_cellm", matches)
733 733
734 734 def test_magic_completion_order(self):
735 735 ip = get_ipython()
736 736 c = ip.Completer
737 737
738 738 # Test ordering of line and cell magics.
739 739 text, matches = c.complete("timeit")
740 740 nt.assert_equal(matches, ["%timeit", "%%timeit"])
741 741
742 742 def test_magic_completion_shadowing(self):
743 743 ip = get_ipython()
744 744 c = ip.Completer
745 745 c.use_jedi = False
746 746
747 747 # Before importing matplotlib, %matplotlib magic should be the only option.
748 748 text, matches = c.complete("mat")
749 749 nt.assert_equal(matches, ["%matplotlib"])
750 750
751 751 # The newly introduced name should shadow the magic.
752 752 ip.run_cell("matplotlib = 1")
753 753 text, matches = c.complete("mat")
754 754 nt.assert_equal(matches, ["matplotlib"])
755 755
756 756 # After removing matplotlib from namespace, the magic should again be
757 757 # the only option.
758 758 del ip.user_ns["matplotlib"]
759 759 text, matches = c.complete("mat")
760 760 nt.assert_equal(matches, ["%matplotlib"])
761 761
762 762 def test_magic_completion_shadowing_explicit(self):
763 763 """
764 764 If the user try to complete a shadowed magic, and explicit % start should
765 765 still return the completions.
766 766 """
767 767 ip = get_ipython()
768 768 c = ip.Completer
769 769
770 770 # Before importing matplotlib, %matplotlib magic should be the only option.
771 771 text, matches = c.complete("%mat")
772 772 nt.assert_equal(matches, ["%matplotlib"])
773 773
774 774 ip.run_cell("matplotlib = 1")
775 775
776 776 # After removing matplotlib from namespace, the magic should still be
777 777 # the only option.
778 778 text, matches = c.complete("%mat")
779 779 nt.assert_equal(matches, ["%matplotlib"])
780 780
781 781 def test_magic_config(self):
782 782 ip = get_ipython()
783 783 c = ip.Completer
784 784
785 785 s, matches = c.complete(None, "conf")
786 786 nt.assert_in("%config", matches)
787 787 s, matches = c.complete(None, "conf")
788 788 nt.assert_not_in("AliasManager", matches)
789 789 s, matches = c.complete(None, "config ")
790 790 nt.assert_in("AliasManager", matches)
791 791 s, matches = c.complete(None, "%config ")
792 792 nt.assert_in("AliasManager", matches)
793 793 s, matches = c.complete(None, "config Ali")
794 794 nt.assert_list_equal(["AliasManager"], matches)
795 795 s, matches = c.complete(None, "%config Ali")
796 796 nt.assert_list_equal(["AliasManager"], matches)
797 797 s, matches = c.complete(None, "config AliasManager")
798 798 nt.assert_list_equal(["AliasManager"], matches)
799 799 s, matches = c.complete(None, "%config AliasManager")
800 800 nt.assert_list_equal(["AliasManager"], matches)
801 801 s, matches = c.complete(None, "config AliasManager.")
802 802 nt.assert_in("AliasManager.default_aliases", matches)
803 803 s, matches = c.complete(None, "%config AliasManager.")
804 804 nt.assert_in("AliasManager.default_aliases", matches)
805 805 s, matches = c.complete(None, "config AliasManager.de")
806 806 nt.assert_list_equal(["AliasManager.default_aliases"], matches)
807 807 s, matches = c.complete(None, "config AliasManager.de")
808 808 nt.assert_list_equal(["AliasManager.default_aliases"], matches)
809 809
810 810 def test_magic_color(self):
811 811 ip = get_ipython()
812 812 c = ip.Completer
813 813
814 814 s, matches = c.complete(None, "colo")
815 815 nt.assert_in("%colors", matches)
816 816 s, matches = c.complete(None, "colo")
817 817 nt.assert_not_in("NoColor", matches)
818 818 s, matches = c.complete(None, "%colors") # No trailing space
819 819 nt.assert_not_in("NoColor", matches)
820 820 s, matches = c.complete(None, "colors ")
821 821 nt.assert_in("NoColor", matches)
822 822 s, matches = c.complete(None, "%colors ")
823 823 nt.assert_in("NoColor", matches)
824 824 s, matches = c.complete(None, "colors NoCo")
825 825 nt.assert_list_equal(["NoColor"], matches)
826 826 s, matches = c.complete(None, "%colors NoCo")
827 827 nt.assert_list_equal(["NoColor"], matches)
828 828
829 829 def test_match_dict_keys(self):
830 830 """
831 831 Test that match_dict_keys works on a couple of use case does return what
832 832 expected, and does not crash
833 833 """
834 834 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
835 835
836 836 keys = ["foo", b"far"]
837 837 assert match_dict_keys(keys, "b'", delims=delims) == ("'", 2, ["far"])
838 838 assert match_dict_keys(keys, "b'f", delims=delims) == ("'", 2, ["far"])
839 839 assert match_dict_keys(keys, 'b"', delims=delims) == ('"', 2, ["far"])
840 840 assert match_dict_keys(keys, 'b"f', delims=delims) == ('"', 2, ["far"])
841 841
842 842 assert match_dict_keys(keys, "'", delims=delims) == ("'", 1, ["foo"])
843 843 assert match_dict_keys(keys, "'f", delims=delims) == ("'", 1, ["foo"])
844 844 assert match_dict_keys(keys, '"', delims=delims) == ('"', 1, ["foo"])
845 845 assert match_dict_keys(keys, '"f', delims=delims) == ('"', 1, ["foo"])
846 846
847 847 match_dict_keys
848 848
849 849 def test_match_dict_keys_tuple(self):
850 850 """
851 851 Test that match_dict_keys called with extra prefix works on a couple of use case,
852 852 does return what expected, and does not crash.
853 853 """
854 854 delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
855 855
856 856 keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ('other', 'test')]
857 857
858 858 # Completion on first key == "foo"
859 859 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["bar", "oof"])
860 860 assert match_dict_keys(keys, "\"", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["bar", "oof"])
861 861 assert match_dict_keys(keys, "'o", delims=delims, extra_prefix=("foo",)) == ("'", 1, ["oof"])
862 862 assert match_dict_keys(keys, "\"o", delims=delims, extra_prefix=("foo",)) == ("\"", 1, ["oof"])
863 863 assert match_dict_keys(keys, "b'", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
864 864 assert match_dict_keys(keys, "b\"", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
865 865 assert match_dict_keys(keys, "b'b", delims=delims, extra_prefix=("foo",)) == ("'", 2, ["bar"])
866 866 assert match_dict_keys(keys, "b\"b", delims=delims, extra_prefix=("foo",)) == ("\"", 2, ["bar"])
867 867
868 868 # No Completion
869 869 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("no_foo",)) == ("'", 1, [])
870 870 assert match_dict_keys(keys, "'", delims=delims, extra_prefix=("fo",)) == ("'", 1, [])
871 871
872 872 keys = [('foo1', 'foo2', 'foo3', 'foo4'), ('foo1', 'foo2', 'bar', 'foo4')]
873 873 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1',)) == ("'", 1, ["foo2", "foo2"])
874 874 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2')) == ("'", 1, ["foo3"])
875 875 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3')) == ("'", 1, ["foo4"])
876 876 assert match_dict_keys(keys, "'foo", delims=delims, extra_prefix=('foo1', 'foo2', 'foo3', 'foo4')) == ("'", 1, [])
877 877
878 878 def test_dict_key_completion_string(self):
879 879 """Test dictionary key completion for string keys"""
880 880 ip = get_ipython()
881 881 complete = ip.Completer.complete
882 882
883 883 ip.user_ns["d"] = {"abc": None}
884 884
885 885 # check completion at different stages
886 886 _, matches = complete(line_buffer="d[")
887 887 nt.assert_in("'abc'", matches)
888 888 nt.assert_not_in("'abc']", matches)
889 889
890 890 _, matches = complete(line_buffer="d['")
891 891 nt.assert_in("abc", matches)
892 892 nt.assert_not_in("abc']", matches)
893 893
894 894 _, matches = complete(line_buffer="d['a")
895 895 nt.assert_in("abc", matches)
896 896 nt.assert_not_in("abc']", matches)
897 897
898 898 # check use of different quoting
899 899 _, matches = complete(line_buffer='d["')
900 900 nt.assert_in("abc", matches)
901 901 nt.assert_not_in('abc"]', matches)
902 902
903 903 _, matches = complete(line_buffer='d["a')
904 904 nt.assert_in("abc", matches)
905 905 nt.assert_not_in('abc"]', matches)
906 906
907 907 # check sensitivity to following context
908 908 _, matches = complete(line_buffer="d[]", cursor_pos=2)
909 909 nt.assert_in("'abc'", matches)
910 910
911 911 _, matches = complete(line_buffer="d['']", cursor_pos=3)
912 912 nt.assert_in("abc", matches)
913 913 nt.assert_not_in("abc'", matches)
914 914 nt.assert_not_in("abc']", matches)
915 915
916 916 # check multiple solutions are correctly returned and that noise is not
917 917 ip.user_ns["d"] = {
918 918 "abc": None,
919 919 "abd": None,
920 920 "bad": None,
921 921 object(): None,
922 922 5: None,
923 923 ("abe", None): None,
924 924 (None, "abf"): None
925 925 }
926 926
927 927 _, matches = complete(line_buffer="d['a")
928 928 nt.assert_in("abc", matches)
929 929 nt.assert_in("abd", matches)
930 930 nt.assert_not_in("bad", matches)
931 931 nt.assert_not_in("abe", matches)
932 932 nt.assert_not_in("abf", matches)
933 933 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
934 934
935 935 # check escaping and whitespace
936 936 ip.user_ns["d"] = {"a\nb": None, "a'b": None, 'a"b': None, "a word": None}
937 937 _, matches = complete(line_buffer="d['a")
938 938 nt.assert_in("a\\nb", matches)
939 939 nt.assert_in("a\\'b", matches)
940 940 nt.assert_in('a"b', matches)
941 941 nt.assert_in("a word", matches)
942 942 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
943 943
944 944 # - can complete on non-initial word of the string
945 945 _, matches = complete(line_buffer="d['a w")
946 946 nt.assert_in("word", matches)
947 947
948 948 # - understands quote escaping
949 949 _, matches = complete(line_buffer="d['a\\'")
950 950 nt.assert_in("b", matches)
951 951
952 952 # - default quoting should work like repr
953 953 _, matches = complete(line_buffer="d[")
954 954 nt.assert_in('"a\'b"', matches)
955 955
956 956 # - when opening quote with ", possible to match with unescaped apostrophe
957 957 _, matches = complete(line_buffer="d[\"a'")
958 958 nt.assert_in("b", matches)
959 959
960 960 # need to not split at delims that readline won't split at
961 961 if "-" not in ip.Completer.splitter.delims:
962 962 ip.user_ns["d"] = {"before-after": None}
963 963 _, matches = complete(line_buffer="d['before-af")
964 964 nt.assert_in("before-after", matches)
965 965
966 966 # check completion on tuple-of-string keys at different stage - on first key
967 967 ip.user_ns["d"] = {('foo', 'bar'): None}
968 968 _, matches = complete(line_buffer="d[")
969 969 nt.assert_in("'foo'", matches)
970 970 nt.assert_not_in("'foo']", matches)
971 971 nt.assert_not_in("'bar'", matches)
972 972 nt.assert_not_in("foo", matches)
973 973 nt.assert_not_in("bar", matches)
974 974
975 975 # - match the prefix
976 976 _, matches = complete(line_buffer="d['f")
977 977 nt.assert_in("foo", matches)
978 978 nt.assert_not_in("foo']", matches)
979 979 nt.assert_not_in("foo\"]", matches)
980 980 _, matches = complete(line_buffer="d['foo")
981 981 nt.assert_in("foo", matches)
982 982
983 983 # - can complete on second key
984 984 _, matches = complete(line_buffer="d['foo', ")
985 985 nt.assert_in("'bar'", matches)
986 986 _, matches = complete(line_buffer="d['foo', 'b")
987 987 nt.assert_in("bar", matches)
988 988 nt.assert_not_in("foo", matches)
989 989
990 990 # - does not propose missing keys
991 991 _, matches = complete(line_buffer="d['foo', 'f")
992 992 nt.assert_not_in("bar", matches)
993 993 nt.assert_not_in("foo", matches)
994 994
995 995 # check sensitivity to following context
996 996 _, matches = complete(line_buffer="d['foo',]", cursor_pos=8)
997 997 nt.assert_in("'bar'", matches)
998 998 nt.assert_not_in("bar", matches)
999 999 nt.assert_not_in("'foo'", matches)
1000 1000 nt.assert_not_in("foo", matches)
1001 1001
1002 1002 _, matches = complete(line_buffer="d['']", cursor_pos=3)
1003 1003 nt.assert_in("foo", matches)
1004 1004 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1005 1005
1006 1006 _, matches = complete(line_buffer='d[""]', cursor_pos=3)
1007 1007 nt.assert_in("foo", matches)
1008 1008 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1009 1009
1010 1010 _, matches = complete(line_buffer='d["foo","]', cursor_pos=9)
1011 1011 nt.assert_in("bar", matches)
1012 1012 assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
1013 1013
1014 1014 _, matches = complete(line_buffer='d["foo",]', cursor_pos=8)
1015 1015 nt.assert_in("'bar'", matches)
1016 1016 nt.assert_not_in("bar", matches)
1017 1017
1018 1018 # Can complete with longer tuple keys
1019 1019 ip.user_ns["d"] = {('foo', 'bar', 'foobar'): None}
1020 1020
1021 1021 # - can complete second key
1022 1022 _, matches = complete(line_buffer="d['foo', 'b")
1023 1023 nt.assert_in('bar', matches)
1024 1024 nt.assert_not_in('foo', matches)
1025 1025 nt.assert_not_in('foobar', matches)
1026 1026
1027 1027 # - can complete third key
1028 1028 _, matches = complete(line_buffer="d['foo', 'bar', 'fo")
1029 1029 nt.assert_in('foobar', matches)
1030 1030 nt.assert_not_in('foo', matches)
1031 1031 nt.assert_not_in('bar', matches)
1032 1032
1033 1033
1034 1034 def test_dict_key_completion_contexts(self):
1035 1035 """Test expression contexts in which dict key completion occurs"""
1036 1036 ip = get_ipython()
1037 1037 complete = ip.Completer.complete
1038 1038 d = {"abc": None}
1039 1039 ip.user_ns["d"] = d
1040 1040
1041 1041 class C:
1042 1042 data = d
1043 1043
1044 1044 ip.user_ns["C"] = C
1045 1045 ip.user_ns["get"] = lambda: d
1046 1046
1047 1047 def assert_no_completion(**kwargs):
1048 1048 _, matches = complete(**kwargs)
1049 1049 nt.assert_not_in("abc", matches)
1050 1050 nt.assert_not_in("abc'", matches)
1051 1051 nt.assert_not_in("abc']", matches)
1052 1052 nt.assert_not_in("'abc'", matches)
1053 1053 nt.assert_not_in("'abc']", matches)
1054 1054
1055 1055 def assert_completion(**kwargs):
1056 1056 _, matches = complete(**kwargs)
1057 1057 nt.assert_in("'abc'", matches)
1058 1058 nt.assert_not_in("'abc']", matches)
1059 1059
1060 1060 # no completion after string closed, even if reopened
1061 1061 assert_no_completion(line_buffer="d['a'")
1062 1062 assert_no_completion(line_buffer='d["a"')
1063 1063 assert_no_completion(line_buffer="d['a' + ")
1064 1064 assert_no_completion(line_buffer="d['a' + '")
1065 1065
1066 1066 # completion in non-trivial expressions
1067 1067 assert_completion(line_buffer="+ d[")
1068 1068 assert_completion(line_buffer="(d[")
1069 1069 assert_completion(line_buffer="C.data[")
1070 1070
1071 1071 # greedy flag
1072 1072 def assert_completion(**kwargs):
1073 1073 _, matches = complete(**kwargs)
1074 1074 nt.assert_in("get()['abc']", matches)
1075 1075
1076 1076 assert_no_completion(line_buffer="get()[")
1077 1077 with greedy_completion():
1078 1078 assert_completion(line_buffer="get()[")
1079 1079 assert_completion(line_buffer="get()['")
1080 1080 assert_completion(line_buffer="get()['a")
1081 1081 assert_completion(line_buffer="get()['ab")
1082 1082 assert_completion(line_buffer="get()['abc")
1083 1083
1084 1084 def test_dict_key_completion_bytes(self):
1085 1085 """Test handling of bytes in dict key completion"""
1086 1086 ip = get_ipython()
1087 1087 complete = ip.Completer.complete
1088 1088
1089 1089 ip.user_ns["d"] = {"abc": None, b"abd": None}
1090 1090
1091 1091 _, matches = complete(line_buffer="d[")
1092 1092 nt.assert_in("'abc'", matches)
1093 1093 nt.assert_in("b'abd'", matches)
1094 1094
1095 1095 if False: # not currently implemented
1096 1096 _, matches = complete(line_buffer="d[b")
1097 1097 nt.assert_in("b'abd'", matches)
1098 1098 nt.assert_not_in("b'abc'", matches)
1099 1099
1100 1100 _, matches = complete(line_buffer="d[b'")
1101 1101 nt.assert_in("abd", matches)
1102 1102 nt.assert_not_in("abc", matches)
1103 1103
1104 1104 _, matches = complete(line_buffer="d[B'")
1105 1105 nt.assert_in("abd", matches)
1106 1106 nt.assert_not_in("abc", matches)
1107 1107
1108 1108 _, matches = complete(line_buffer="d['")
1109 1109 nt.assert_in("abc", matches)
1110 1110 nt.assert_not_in("abd", matches)
1111 1111
1112 1112 def test_dict_key_completion_unicode_py3(self):
1113 1113 """Test handling of unicode in dict key completion"""
1114 1114 ip = get_ipython()
1115 1115 complete = ip.Completer.complete
1116 1116
1117 1117 ip.user_ns["d"] = {"a\u05d0": None}
1118 1118
1119 1119 # query using escape
1120 1120 if sys.platform != "win32":
1121 1121 # Known failure on Windows
1122 1122 _, matches = complete(line_buffer="d['a\\u05d0")
1123 1123 nt.assert_in("u05d0", matches) # tokenized after \\
1124 1124
1125 1125 # query using character
1126 1126 _, matches = complete(line_buffer="d['a\u05d0")
1127 1127 nt.assert_in("a\u05d0", matches)
1128 1128
1129 1129 with greedy_completion():
1130 1130 # query using escape
1131 1131 _, matches = complete(line_buffer="d['a\\u05d0")
1132 1132 nt.assert_in("d['a\\u05d0']", matches) # tokenized after \\
1133 1133
1134 1134 # query using character
1135 1135 _, matches = complete(line_buffer="d['a\u05d0")
1136 1136 nt.assert_in("d['a\u05d0']", matches)
1137 1137
1138 1138 @dec.skip_without("numpy")
1139 1139 def test_struct_array_key_completion(self):
1140 1140 """Test dict key completion applies to numpy struct arrays"""
1141 1141 import numpy
1142 1142
1143 1143 ip = get_ipython()
1144 1144 complete = ip.Completer.complete
1145 1145 ip.user_ns["d"] = numpy.array([], dtype=[("hello", "f"), ("world", "f")])
1146 1146 _, matches = complete(line_buffer="d['")
1147 1147 nt.assert_in("hello", matches)
1148 1148 nt.assert_in("world", matches)
1149 1149 # complete on the numpy struct itself
1150 1150 dt = numpy.dtype(
1151 1151 [("my_head", [("my_dt", ">u4"), ("my_df", ">u4")]), ("my_data", ">f4", 5)]
1152 1152 )
1153 1153 x = numpy.zeros(2, dtype=dt)
1154 1154 ip.user_ns["d"] = x[1]
1155 1155 _, matches = complete(line_buffer="d['")
1156 1156 nt.assert_in("my_head", matches)
1157 1157 nt.assert_in("my_data", matches)
1158 1158 # complete on a nested level
1159 1159 with greedy_completion():
1160 1160 ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
1161 1161 _, matches = complete(line_buffer="d[1]['my_head']['")
1162 1162 nt.assert_true(any(["my_dt" in m for m in matches]))
1163 1163 nt.assert_true(any(["my_df" in m for m in matches]))
1164 1164
1165 1165 @dec.skip_without("pandas")
1166 1166 def test_dataframe_key_completion(self):
1167 1167 """Test dict key completion applies to pandas DataFrames"""
1168 1168 import pandas
1169 1169
1170 1170 ip = get_ipython()
1171 1171 complete = ip.Completer.complete
1172 1172 ip.user_ns["d"] = pandas.DataFrame({"hello": [1], "world": [2]})
1173 1173 _, matches = complete(line_buffer="d['")
1174 1174 nt.assert_in("hello", matches)
1175 1175 nt.assert_in("world", matches)
1176 1176
1177 1177 def test_dict_key_completion_invalids(self):
1178 1178 """Smoke test cases dict key completion can't handle"""
1179 1179 ip = get_ipython()
1180 1180 complete = ip.Completer.complete
1181 1181
1182 1182 ip.user_ns["no_getitem"] = None
1183 1183 ip.user_ns["no_keys"] = []
1184 1184 ip.user_ns["cant_call_keys"] = dict
1185 1185 ip.user_ns["empty"] = {}
1186 1186 ip.user_ns["d"] = {"abc": 5}
1187 1187
1188 1188 _, matches = complete(line_buffer="no_getitem['")
1189 1189 _, matches = complete(line_buffer="no_keys['")
1190 1190 _, matches = complete(line_buffer="cant_call_keys['")
1191 1191 _, matches = complete(line_buffer="empty['")
1192 1192 _, matches = complete(line_buffer="name_error['")
1193 1193 _, matches = complete(line_buffer="d['\\") # incomplete escape
1194 1194
1195 1195 def test_object_key_completion(self):
1196 1196 ip = get_ipython()
1197 1197 ip.user_ns["key_completable"] = KeyCompletable(["qwerty", "qwick"])
1198 1198
1199 1199 _, matches = ip.Completer.complete(line_buffer="key_completable['qw")
1200 1200 nt.assert_in("qwerty", matches)
1201 1201 nt.assert_in("qwick", matches)
1202 1202
1203 1203 def test_class_key_completion(self):
1204 1204 ip = get_ipython()
1205 1205 NamedInstanceClass("qwerty")
1206 1206 NamedInstanceClass("qwick")
1207 1207 ip.user_ns["named_instance_class"] = NamedInstanceClass
1208 1208
1209 1209 _, matches = ip.Completer.complete(line_buffer="named_instance_class['qw")
1210 1210 nt.assert_in("qwerty", matches)
1211 1211 nt.assert_in("qwick", matches)
1212 1212
1213 1213 def test_tryimport(self):
1214 1214 """
1215 1215 Test that try-import don't crash on trailing dot, and import modules before
1216 1216 """
1217 1217 from IPython.core.completerlib import try_import
1218 1218
1219 1219 assert try_import("IPython.")
1220 1220
1221 1221 def test_aimport_module_completer(self):
1222 1222 ip = get_ipython()
1223 1223 _, matches = ip.complete("i", "%aimport i")
1224 1224 nt.assert_in("io", matches)
1225 1225 nt.assert_not_in("int", matches)
1226 1226
1227 1227 def test_nested_import_module_completer(self):
1228 1228 ip = get_ipython()
1229 1229 _, matches = ip.complete(None, "import IPython.co", 17)
1230 1230 nt.assert_in("IPython.core", matches)
1231 1231 nt.assert_not_in("import IPython.core", matches)
1232 1232 nt.assert_not_in("IPython.display", matches)
1233 1233
1234 1234 def test_import_module_completer(self):
1235 1235 ip = get_ipython()
1236 1236 _, matches = ip.complete("i", "import i")
1237 1237 nt.assert_in("io", matches)
1238 1238 nt.assert_not_in("int", matches)
1239 1239
1240 1240 def test_from_module_completer(self):
1241 1241 ip = get_ipython()
1242 1242 _, matches = ip.complete("B", "from io import B", 16)
1243 1243 nt.assert_in("BytesIO", matches)
1244 1244 nt.assert_not_in("BaseException", matches)
1245 1245
1246 1246 def test_snake_case_completion(self):
1247 1247 ip = get_ipython()
1248 1248 ip.Completer.use_jedi = False
1249 1249 ip.user_ns["some_three"] = 3
1250 1250 ip.user_ns["some_four"] = 4
1251 1251 _, matches = ip.complete("s_", "print(s_f")
1252 1252 nt.assert_in("some_three", matches)
1253 1253 nt.assert_in("some_four", matches)
1254 1254
1255 1255 def test_mix_terms(self):
1256 1256 ip = get_ipython()
1257 1257 from textwrap import dedent
1258 1258
1259 1259 ip.Completer.use_jedi = False
1260 1260 ip.ex(
1261 1261 dedent(
1262 1262 """
1263 1263 class Test:
1264 1264 def meth(self, meth_arg1):
1265 1265 print("meth")
1266 1266
1267 1267 def meth_1(self, meth1_arg1, meth1_arg2):
1268 1268 print("meth1")
1269 1269
1270 1270 def meth_2(self, meth2_arg1, meth2_arg2):
1271 1271 print("meth2")
1272 1272 test = Test()
1273 1273 """
1274 1274 )
1275 1275 )
1276 1276 _, matches = ip.complete(None, "test.meth(")
1277 1277 nt.assert_in("meth_arg1=", matches)
1278 1278 nt.assert_not_in("meth2_arg1=", matches)
@@ -1,490 +1,490 b''
1 1 # Copyright (c) IPython Development Team.
2 2 # Distributed under the terms of the Modified BSD License.
3 3
4 4 import json
5 5 import os
6 6 import warnings
7 7
8 8 from unittest import mock
9 9
10 10 import nose.tools as nt
11 11
12 12 from IPython import display
13 13 from IPython.core.getipython import get_ipython
14 14 from IPython.utils.io import capture_output
15 15 from IPython.utils.tempdir import NamedFileInTemporaryDirectory
16 16 from IPython import paths as ipath
17 17 from IPython.testing.tools import AssertNotPrints
18 18
19 19 import IPython.testing.decorators as dec
20 20
21 21 def test_image_size():
22 22 """Simple test for display.Image(args, width=x,height=y)"""
23 23 thisurl = 'http://www.google.fr/images/srpr/logo3w.png'
24 24 img = display.Image(url=thisurl, width=200, height=200)
25 25 nt.assert_equal(u'<img src="%s" width="200" height="200"/>' % (thisurl), img._repr_html_())
26 26 img = display.Image(url=thisurl, metadata={'width':200, 'height':200})
27 27 nt.assert_equal(u'<img src="%s" width="200" height="200"/>' % (thisurl), img._repr_html_())
28 28 img = display.Image(url=thisurl, width=200)
29 29 nt.assert_equal(u'<img src="%s" width="200"/>' % (thisurl), img._repr_html_())
30 30 img = display.Image(url=thisurl)
31 31 nt.assert_equal(u'<img src="%s"/>' % (thisurl), img._repr_html_())
32 32 img = display.Image(url=thisurl, unconfined=True)
33 33 nt.assert_equal(u'<img src="%s" class="unconfined"/>' % (thisurl), img._repr_html_())
34 34
35 35
36 36 def test_image_mimes():
37 37 fmt = get_ipython().display_formatter.format
38 38 for format in display.Image._ACCEPTABLE_EMBEDDINGS:
39 39 mime = display.Image._MIMETYPES[format]
40 40 img = display.Image(b'garbage', format=format)
41 41 data, metadata = fmt(img)
42 42 nt.assert_equal(sorted(data), sorted([mime, 'text/plain']))
43 43
44 44
45 45 def test_geojson():
46 46
47 47 gj = display.GeoJSON(data={
48 48 "type": "Feature",
49 49 "geometry": {
50 50 "type": "Point",
51 51 "coordinates": [-81.327, 296.038]
52 52 },
53 53 "properties": {
54 54 "name": "Inca City"
55 55 }
56 56 },
57 57 url_template="http://s3-eu-west-1.amazonaws.com/whereonmars.cartodb.net/{basemap_id}/{z}/{x}/{y}.png",
58 58 layer_options={
59 59 "basemap_id": "celestia_mars-shaded-16k_global",
60 60 "attribution": "Celestia/praesepe",
61 61 "minZoom": 0,
62 62 "maxZoom": 18,
63 63 })
64 64 nt.assert_equal(u'<IPython.core.display.GeoJSON object>', str(gj))
65 65
66 66 def test_retina_png():
67 67 here = os.path.dirname(__file__)
68 68 img = display.Image(os.path.join(here, "2x2.png"), retina=True)
69 69 nt.assert_equal(img.height, 1)
70 70 nt.assert_equal(img.width, 1)
71 71 data, md = img._repr_png_()
72 72 nt.assert_equal(md['width'], 1)
73 73 nt.assert_equal(md['height'], 1)
74 74
75 75 def test_embed_svg_url():
76 76 import gzip
77 77 from io import BytesIO
78 78 svg_data = b'<svg><circle x="0" y="0" r="1"/></svg>'
79 79 url = 'http://test.com/circle.svg'
80 80
81 81 gzip_svg = BytesIO()
82 82 with gzip.open(gzip_svg, 'wb') as fp:
83 83 fp.write(svg_data)
84 84 gzip_svg = gzip_svg.getvalue()
85 85
86 86 def mocked_urlopen(*args, **kwargs):
87 87 class MockResponse:
88 88 def __init__(self, svg):
89 89 self._svg_data = svg
90 90 self.headers = {'content-type': 'image/svg+xml'}
91 91
92 92 def read(self):
93 93 return self._svg_data
94 94
95 95 if args[0] == url:
96 96 return MockResponse(svg_data)
97 97 elif args[0] == url + "z":
98 98 ret = MockResponse(gzip_svg)
99 99 ret.headers["content-encoding"] = "gzip"
100 100 return ret
101 101 return MockResponse(None)
102 102
103 103 with mock.patch('urllib.request.urlopen', side_effect=mocked_urlopen):
104 104 svg = display.SVG(url=url)
105 105 nt.assert_true(svg._repr_svg_().startswith('<svg'))
106 106 svg = display.SVG(url=url + 'z')
107 107 nt.assert_true(svg._repr_svg_().startswith('<svg'))
108 108
109 109 def test_retina_jpeg():
110 110 here = os.path.dirname(__file__)
111 111 img = display.Image(os.path.join(here, "2x2.jpg"), retina=True)
112 112 nt.assert_equal(img.height, 1)
113 113 nt.assert_equal(img.width, 1)
114 114 data, md = img._repr_jpeg_()
115 115 nt.assert_equal(md['width'], 1)
116 116 nt.assert_equal(md['height'], 1)
117 117
118 118 def test_base64image():
119 119 display.Image("iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAAACnej3aAAAAAWJLR0QAiAUdSAAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB94BCRQnOqNu0b4AAAAKSURBVAjXY2AAAAACAAHiIbwzAAAAAElFTkSuQmCC")
120 120
121 121 def test_image_filename_defaults():
122 122 '''test format constraint, and validity of jpeg and png'''
123 123 tpath = ipath.get_ipython_package_dir()
124 124 nt.assert_raises(ValueError, display.Image, filename=os.path.join(tpath, 'testing/tests/badformat.zip'),
125 125 embed=True)
126 126 nt.assert_raises(ValueError, display.Image)
127 127 nt.assert_raises(ValueError, display.Image, data='this is not an image', format='badformat', embed=True)
128 # check boths paths to allow packages to test at build and install time
128 # check both paths to allow packages to test at build and install time
129 129 imgfile = os.path.join(tpath, 'core/tests/2x2.png')
130 130 img = display.Image(filename=imgfile)
131 131 nt.assert_equal('png', img.format)
132 132 nt.assert_is_not_none(img._repr_png_())
133 133 img = display.Image(filename=os.path.join(tpath, 'testing/tests/logo.jpg'), embed=False)
134 134 nt.assert_equal('jpeg', img.format)
135 135 nt.assert_is_none(img._repr_jpeg_())
136 136
137 137 def _get_inline_config():
138 138 from matplotlib_inline.config import InlineBackend
139 139 return InlineBackend.instance()
140 140
141 141
142 142 @dec.skip_without("ipykernel")
143 143 @dec.skip_without("matplotlib")
144 144 def test_set_matplotlib_close():
145 145 cfg = _get_inline_config()
146 146 cfg.close_figures = False
147 147 display.set_matplotlib_close()
148 148 assert cfg.close_figures
149 149 display.set_matplotlib_close(False)
150 150 assert not cfg.close_figures
151 151
152 152 _fmt_mime_map = {
153 153 'png': 'image/png',
154 154 'jpeg': 'image/jpeg',
155 155 'pdf': 'application/pdf',
156 156 'retina': 'image/png',
157 157 'svg': 'image/svg+xml',
158 158 }
159 159
160 160 @dec.skip_without('matplotlib')
161 161 def test_set_matplotlib_formats():
162 162 from matplotlib.figure import Figure
163 163 formatters = get_ipython().display_formatter.formatters
164 164 for formats in [
165 165 ('png',),
166 166 ('pdf', 'svg'),
167 167 ('jpeg', 'retina', 'png'),
168 168 (),
169 169 ]:
170 170 active_mimes = {_fmt_mime_map[fmt] for fmt in formats}
171 171 display.set_matplotlib_formats(*formats)
172 172 for mime, f in formatters.items():
173 173 if mime in active_mimes:
174 174 nt.assert_in(Figure, f)
175 175 else:
176 176 nt.assert_not_in(Figure, f)
177 177
178 178
179 179 @dec.skip_without("ipykernel")
180 180 @dec.skip_without("matplotlib")
181 181 def test_set_matplotlib_formats_kwargs():
182 182 from matplotlib.figure import Figure
183 183 ip = get_ipython()
184 184 cfg = _get_inline_config()
185 185 cfg.print_figure_kwargs.update(dict(foo='bar'))
186 186 kwargs = dict(dpi=150)
187 187 display.set_matplotlib_formats('png', **kwargs)
188 188 formatter = ip.display_formatter.formatters['image/png']
189 189 f = formatter.lookup_by_type(Figure)
190 190 formatter_kwargs = f.keywords
191 191 expected = kwargs
192 192 expected["base64"] = True
193 193 expected["fmt"] = "png"
194 194 expected.update(cfg.print_figure_kwargs)
195 195 nt.assert_equal(formatter_kwargs, expected)
196 196
197 197 def test_display_available():
198 198 """
199 199 Test that display is available without import
200 200
201 201 We don't really care if it's in builtin or anything else, but it should
202 202 always be available.
203 203 """
204 204 ip = get_ipython()
205 205 with AssertNotPrints('NameError'):
206 206 ip.run_cell('display')
207 207 try:
208 208 ip.run_cell('del display')
209 209 except NameError:
210 210 pass # it's ok, it might be in builtins
211 211 # even if deleted it should be back
212 212 with AssertNotPrints('NameError'):
213 213 ip.run_cell('display')
214 214
215 215 def test_textdisplayobj_pretty_repr():
216 216 p = display.Pretty("This is a simple test")
217 217 nt.assert_equal(repr(p), '<IPython.core.display.Pretty object>')
218 218 nt.assert_equal(p.data, 'This is a simple test')
219 219
220 220 p._show_mem_addr = True
221 221 nt.assert_equal(repr(p), object.__repr__(p))
222 222
223 223 def test_displayobject_repr():
224 224 h = display.HTML('<br />')
225 225 nt.assert_equal(repr(h), '<IPython.core.display.HTML object>')
226 226 h._show_mem_addr = True
227 227 nt.assert_equal(repr(h), object.__repr__(h))
228 228 h._show_mem_addr = False
229 229 nt.assert_equal(repr(h), '<IPython.core.display.HTML object>')
230 230
231 231 j = display.Javascript('')
232 232 nt.assert_equal(repr(j), '<IPython.core.display.Javascript object>')
233 233 j._show_mem_addr = True
234 234 nt.assert_equal(repr(j), object.__repr__(j))
235 235 j._show_mem_addr = False
236 236 nt.assert_equal(repr(j), '<IPython.core.display.Javascript object>')
237 237
238 238 @mock.patch('warnings.warn')
239 239 def test_encourage_iframe_over_html(m_warn):
240 240 display.HTML()
241 241 m_warn.assert_not_called()
242 242
243 243 display.HTML('<br />')
244 244 m_warn.assert_not_called()
245 245
246 246 display.HTML('<html><p>Lots of content here</p><iframe src="http://a.com"></iframe>')
247 247 m_warn.assert_not_called()
248 248
249 249 display.HTML('<iframe src="http://a.com"></iframe>')
250 250 m_warn.assert_called_with('Consider using IPython.display.IFrame instead')
251 251
252 252 m_warn.reset_mock()
253 253 display.HTML('<IFRAME SRC="http://a.com"></IFRAME>')
254 254 m_warn.assert_called_with('Consider using IPython.display.IFrame instead')
255 255
256 256 def test_progress():
257 257 p = display.ProgressBar(10)
258 258 nt.assert_in('0/10',repr(p))
259 259 p.html_width = '100%'
260 260 p.progress = 5
261 261 nt.assert_equal(p._repr_html_(), "<progress style='width:100%' max='10' value='5'></progress>")
262 262
263 263 def test_progress_iter():
264 264 with capture_output(display=False) as captured:
265 265 for i in display.ProgressBar(5):
266 266 out = captured.stdout
267 267 nt.assert_in('{0}/5'.format(i), out)
268 268 out = captured.stdout
269 269 nt.assert_in('5/5', out)
270 270
271 271 def test_json():
272 272 d = {'a': 5}
273 273 lis = [d]
274 274 metadata = [
275 275 {'expanded': False, 'root': 'root'},
276 276 {'expanded': True, 'root': 'root'},
277 277 {'expanded': False, 'root': 'custom'},
278 278 {'expanded': True, 'root': 'custom'},
279 279 ]
280 280 json_objs = [
281 281 display.JSON(d),
282 282 display.JSON(d, expanded=True),
283 283 display.JSON(d, root='custom'),
284 284 display.JSON(d, expanded=True, root='custom'),
285 285 ]
286 286 for j, md in zip(json_objs, metadata):
287 287 nt.assert_equal(j._repr_json_(), (d, md))
288 288
289 289 with warnings.catch_warnings(record=True) as w:
290 290 warnings.simplefilter("always")
291 291 j = display.JSON(json.dumps(d))
292 292 nt.assert_equal(len(w), 1)
293 293 nt.assert_equal(j._repr_json_(), (d, metadata[0]))
294 294
295 295 json_objs = [
296 296 display.JSON(lis),
297 297 display.JSON(lis, expanded=True),
298 298 display.JSON(lis, root='custom'),
299 299 display.JSON(lis, expanded=True, root='custom'),
300 300 ]
301 301 for j, md in zip(json_objs, metadata):
302 302 nt.assert_equal(j._repr_json_(), (lis, md))
303 303
304 304 with warnings.catch_warnings(record=True) as w:
305 305 warnings.simplefilter("always")
306 306 j = display.JSON(json.dumps(lis))
307 307 nt.assert_equal(len(w), 1)
308 308 nt.assert_equal(j._repr_json_(), (lis, metadata[0]))
309 309
310 310 def test_video_embedding():
311 311 """use a tempfile, with dummy-data, to ensure that video embedding doesn't crash"""
312 312 v = display.Video("http://ignored")
313 313 assert not v.embed
314 314 html = v._repr_html_()
315 315 nt.assert_not_in('src="data:', html)
316 316 nt.assert_in('src="http://ignored"', html)
317 317
318 318 with nt.assert_raises(ValueError):
319 319 v = display.Video(b'abc')
320 320
321 321 with NamedFileInTemporaryDirectory('test.mp4') as f:
322 322 f.write(b'abc')
323 323 f.close()
324 324
325 325 v = display.Video(f.name)
326 326 assert not v.embed
327 327 html = v._repr_html_()
328 328 nt.assert_not_in('src="data:', html)
329 329
330 330 v = display.Video(f.name, embed=True)
331 331 html = v._repr_html_()
332 332 nt.assert_in('src="data:video/mp4;base64,YWJj"',html)
333 333
334 334 v = display.Video(f.name, embed=True, mimetype='video/other')
335 335 html = v._repr_html_()
336 336 nt.assert_in('src="data:video/other;base64,YWJj"',html)
337 337
338 338 v = display.Video(b'abc', embed=True, mimetype='video/mp4')
339 339 html = v._repr_html_()
340 340 nt.assert_in('src="data:video/mp4;base64,YWJj"',html)
341 341
342 342 v = display.Video(u'YWJj', embed=True, mimetype='video/xyz')
343 343 html = v._repr_html_()
344 344 nt.assert_in('src="data:video/xyz;base64,YWJj"',html)
345 345
346 346 def test_html_metadata():
347 347 s = "<h1>Test</h1>"
348 348 h = display.HTML(s, metadata={"isolated": True})
349 349 nt.assert_equal(h._repr_html_(), (s, {"isolated": True}))
350 350
351 351 def test_display_id():
352 352 ip = get_ipython()
353 353 with mock.patch.object(ip.display_pub, 'publish') as pub:
354 354 handle = display.display('x')
355 355 nt.assert_is(handle, None)
356 356 handle = display.display('y', display_id='secret')
357 357 nt.assert_is_instance(handle, display.DisplayHandle)
358 358 handle2 = display.display('z', display_id=True)
359 359 nt.assert_is_instance(handle2, display.DisplayHandle)
360 360 nt.assert_not_equal(handle.display_id, handle2.display_id)
361 361
362 362 nt.assert_equal(pub.call_count, 3)
363 363 args, kwargs = pub.call_args_list[0]
364 364 nt.assert_equal(args, ())
365 365 nt.assert_equal(kwargs, {
366 366 'data': {
367 367 'text/plain': repr('x')
368 368 },
369 369 'metadata': {},
370 370 })
371 371 args, kwargs = pub.call_args_list[1]
372 372 nt.assert_equal(args, ())
373 373 nt.assert_equal(kwargs, {
374 374 'data': {
375 375 'text/plain': repr('y')
376 376 },
377 377 'metadata': {},
378 378 'transient': {
379 379 'display_id': handle.display_id,
380 380 },
381 381 })
382 382 args, kwargs = pub.call_args_list[2]
383 383 nt.assert_equal(args, ())
384 384 nt.assert_equal(kwargs, {
385 385 'data': {
386 386 'text/plain': repr('z')
387 387 },
388 388 'metadata': {},
389 389 'transient': {
390 390 'display_id': handle2.display_id,
391 391 },
392 392 })
393 393
394 394
395 395 def test_update_display():
396 396 ip = get_ipython()
397 397 with mock.patch.object(ip.display_pub, 'publish') as pub:
398 398 with nt.assert_raises(TypeError):
399 399 display.update_display('x')
400 400 display.update_display('x', display_id='1')
401 401 display.update_display('y', display_id='2')
402 402 args, kwargs = pub.call_args_list[0]
403 403 nt.assert_equal(args, ())
404 404 nt.assert_equal(kwargs, {
405 405 'data': {
406 406 'text/plain': repr('x')
407 407 },
408 408 'metadata': {},
409 409 'transient': {
410 410 'display_id': '1',
411 411 },
412 412 'update': True,
413 413 })
414 414 args, kwargs = pub.call_args_list[1]
415 415 nt.assert_equal(args, ())
416 416 nt.assert_equal(kwargs, {
417 417 'data': {
418 418 'text/plain': repr('y')
419 419 },
420 420 'metadata': {},
421 421 'transient': {
422 422 'display_id': '2',
423 423 },
424 424 'update': True,
425 425 })
426 426
427 427
428 428 def test_display_handle():
429 429 ip = get_ipython()
430 430 handle = display.DisplayHandle()
431 431 nt.assert_is_instance(handle.display_id, str)
432 432 handle = display.DisplayHandle('my-id')
433 433 nt.assert_equal(handle.display_id, 'my-id')
434 434 with mock.patch.object(ip.display_pub, 'publish') as pub:
435 435 handle.display('x')
436 436 handle.update('y')
437 437
438 438 args, kwargs = pub.call_args_list[0]
439 439 nt.assert_equal(args, ())
440 440 nt.assert_equal(kwargs, {
441 441 'data': {
442 442 'text/plain': repr('x')
443 443 },
444 444 'metadata': {},
445 445 'transient': {
446 446 'display_id': handle.display_id,
447 447 }
448 448 })
449 449 args, kwargs = pub.call_args_list[1]
450 450 nt.assert_equal(args, ())
451 451 nt.assert_equal(kwargs, {
452 452 'data': {
453 453 'text/plain': repr('y')
454 454 },
455 455 'metadata': {},
456 456 'transient': {
457 457 'display_id': handle.display_id,
458 458 },
459 459 'update': True,
460 460 })
461 461
462 462
463 463 def test_image_alt_tag():
464 464 """Simple test for display.Image(args, alt=x,)"""
465 465 thisurl = "http://example.com/image.png"
466 466 img = display.Image(url=thisurl, alt="an image")
467 467 nt.assert_equal(u'<img src="%s" alt="an image"/>' % (thisurl), img._repr_html_())
468 468 img = display.Image(url=thisurl, unconfined=True, alt="an image")
469 469 nt.assert_equal(
470 470 u'<img src="%s" class="unconfined" alt="an image"/>' % (thisurl),
471 471 img._repr_html_(),
472 472 )
473 473 img = display.Image(url=thisurl, alt='>"& <')
474 474 nt.assert_equal(
475 475 u'<img src="%s" alt="&gt;&quot;&amp; &lt;"/>' % (thisurl), img._repr_html_()
476 476 )
477 477
478 478 img = display.Image(url=thisurl, metadata={"alt": "an image"})
479 479 nt.assert_equal(img.alt, "an image")
480 480
481 481 here = os.path.dirname(__file__)
482 482 img = display.Image(os.path.join(here, "2x2.png"), alt="an image")
483 483 nt.assert_equal(img.alt, "an image")
484 484 _, md = img._repr_png_()
485 485 nt.assert_equal(md["alt"], "an image")
486 486
487 487
488 488 @nt.raises(FileNotFoundError)
489 489 def test_image_bad_filename_raises_proper_exception():
490 490 display.Image("/this/file/does/not/exist/")._repr_png_()
@@ -1,355 +1,355 b''
1 1 """Tests for the token-based transformers in IPython.core.inputtransformer2
2 2
3 3 Line-based transformers are the simpler ones; token-based transformers are
4 4 more complex. See test_inputtransformer2_line for tests for line-based
5 5 transformations.
6 6 """
7 7 import nose.tools as nt
8 8 import string
9 9
10 10 from IPython.core import inputtransformer2 as ipt2
11 11 from IPython.core.inputtransformer2 import make_tokens_by_line, _find_assign_op
12 12
13 13 from textwrap import dedent
14 14
15 15 MULTILINE_MAGIC = ("""\
16 16 a = f()
17 17 %foo \\
18 18 bar
19 19 g()
20 20 """.splitlines(keepends=True), (2, 0), """\
21 21 a = f()
22 22 get_ipython().run_line_magic('foo', ' bar')
23 23 g()
24 24 """.splitlines(keepends=True))
25 25
26 26 INDENTED_MAGIC = ("""\
27 27 for a in range(5):
28 28 %ls
29 29 """.splitlines(keepends=True), (2, 4), """\
30 30 for a in range(5):
31 31 get_ipython().run_line_magic('ls', '')
32 32 """.splitlines(keepends=True))
33 33
34 34 CRLF_MAGIC = ([
35 35 "a = f()\n",
36 36 "%ls\r\n",
37 37 "g()\n"
38 38 ], (2, 0), [
39 39 "a = f()\n",
40 40 "get_ipython().run_line_magic('ls', '')\n",
41 41 "g()\n"
42 42 ])
43 43
44 44 MULTILINE_MAGIC_ASSIGN = ("""\
45 45 a = f()
46 46 b = %foo \\
47 47 bar
48 48 g()
49 49 """.splitlines(keepends=True), (2, 4), """\
50 50 a = f()
51 51 b = get_ipython().run_line_magic('foo', ' bar')
52 52 g()
53 53 """.splitlines(keepends=True))
54 54
55 55 MULTILINE_SYSTEM_ASSIGN = ("""\
56 56 a = f()
57 57 b = !foo \\
58 58 bar
59 59 g()
60 60 """.splitlines(keepends=True), (2, 4), """\
61 61 a = f()
62 62 b = get_ipython().getoutput('foo bar')
63 63 g()
64 64 """.splitlines(keepends=True))
65 65
66 66 #####
67 67
68 68 MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT = ("""\
69 69 def test():
70 70 for i in range(1):
71 71 print(i)
72 72 res =! ls
73 73 """.splitlines(keepends=True), (4, 7), '''\
74 74 def test():
75 75 for i in range(1):
76 76 print(i)
77 77 res =get_ipython().getoutput(\' ls\')
78 78 '''.splitlines(keepends=True))
79 79
80 80 ######
81 81
82 82 AUTOCALL_QUOTE = (
83 83 [",f 1 2 3\n"], (1, 0),
84 84 ['f("1", "2", "3")\n']
85 85 )
86 86
87 87 AUTOCALL_QUOTE2 = (
88 88 [";f 1 2 3\n"], (1, 0),
89 89 ['f("1 2 3")\n']
90 90 )
91 91
92 92 AUTOCALL_PAREN = (
93 93 ["/f 1 2 3\n"], (1, 0),
94 94 ['f(1, 2, 3)\n']
95 95 )
96 96
97 97 SIMPLE_HELP = (
98 98 ["foo?\n"], (1, 0),
99 99 ["get_ipython().run_line_magic('pinfo', 'foo')\n"]
100 100 )
101 101
102 102 DETAILED_HELP = (
103 103 ["foo??\n"], (1, 0),
104 104 ["get_ipython().run_line_magic('pinfo2', 'foo')\n"]
105 105 )
106 106
107 107 MAGIC_HELP = (
108 108 ["%foo?\n"], (1, 0),
109 109 ["get_ipython().run_line_magic('pinfo', '%foo')\n"]
110 110 )
111 111
112 112 HELP_IN_EXPR = (
113 113 ["a = b + c?\n"], (1, 0),
114 114 ["get_ipython().set_next_input('a = b + c');"
115 115 "get_ipython().run_line_magic('pinfo', 'c')\n"]
116 116 )
117 117
118 118 HELP_CONTINUED_LINE = ("""\
119 119 a = \\
120 120 zip?
121 121 """.splitlines(keepends=True), (1, 0),
122 122 [r"get_ipython().set_next_input('a = \\\nzip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"]
123 123 )
124 124
125 125 HELP_MULTILINE = ("""\
126 126 (a,
127 127 b) = zip?
128 128 """.splitlines(keepends=True), (1, 0),
129 129 [r"get_ipython().set_next_input('(a,\nb) = zip');get_ipython().run_line_magic('pinfo', 'zip')" + "\n"]
130 130 )
131 131
132 132 HELP_UNICODE = (
133 133 ["π.foo?\n"], (1, 0),
134 134 ["get_ipython().run_line_magic('pinfo', 'π.foo')\n"]
135 135 )
136 136
137 137
138 138 def null_cleanup_transformer(lines):
139 139 """
140 140 A cleanup transform that returns an empty list.
141 141 """
142 142 return []
143 143
144 144 def check_make_token_by_line_never_ends_empty():
145 145 """
146 146 Check that not sequence of single or double characters ends up leading to en empty list of tokens
147 147 """
148 148 from string import printable
149 149 for c in printable:
150 150 nt.assert_not_equal(make_tokens_by_line(c)[-1], [])
151 151 for k in printable:
152 152 nt.assert_not_equal(make_tokens_by_line(c+k)[-1], [])
153 153
154 154 def check_find(transformer, case, match=True):
155 155 sample, expected_start, _ = case
156 156 tbl = make_tokens_by_line(sample)
157 157 res = transformer.find(tbl)
158 158 if match:
159 159 # start_line is stored 0-indexed, expected values are 1-indexed
160 160 nt.assert_equal((res.start_line+1, res.start_col), expected_start)
161 161 return res
162 162 else:
163 163 nt.assert_is(res, None)
164 164
165 165 def check_transform(transformer_cls, case):
166 166 lines, start, expected = case
167 167 transformer = transformer_cls(start)
168 168 nt.assert_equal(transformer.transform(lines), expected)
169 169
170 170 def test_continued_line():
171 171 lines = MULTILINE_MAGIC_ASSIGN[0]
172 172 nt.assert_equal(ipt2.find_end_of_continued_line(lines, 1), 2)
173 173
174 174 nt.assert_equal(ipt2.assemble_continued_line(lines, (1, 5), 2), "foo bar")
175 175
176 176 def test_find_assign_magic():
177 177 check_find(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
178 178 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN, match=False)
179 179 check_find(ipt2.MagicAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT, match=False)
180 180
181 181 def test_transform_assign_magic():
182 182 check_transform(ipt2.MagicAssign, MULTILINE_MAGIC_ASSIGN)
183 183
184 184 def test_find_assign_system():
185 185 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
186 186 check_find(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
187 187 check_find(ipt2.SystemAssign, (["a = !ls\n"], (1, 5), None))
188 188 check_find(ipt2.SystemAssign, (["a=!ls\n"], (1, 2), None))
189 189 check_find(ipt2.SystemAssign, MULTILINE_MAGIC_ASSIGN, match=False)
190 190
191 191 def test_transform_assign_system():
192 192 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN)
193 193 check_transform(ipt2.SystemAssign, MULTILINE_SYSTEM_ASSIGN_AFTER_DEDENT)
194 194
195 195 def test_find_magic_escape():
196 196 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC)
197 197 check_find(ipt2.EscapedCommand, INDENTED_MAGIC)
198 198 check_find(ipt2.EscapedCommand, MULTILINE_MAGIC_ASSIGN, match=False)
199 199
200 200 def test_transform_magic_escape():
201 201 check_transform(ipt2.EscapedCommand, MULTILINE_MAGIC)
202 202 check_transform(ipt2.EscapedCommand, INDENTED_MAGIC)
203 203 check_transform(ipt2.EscapedCommand, CRLF_MAGIC)
204 204
205 205 def test_find_autocalls():
206 206 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
207 207 print("Testing %r" % case[0])
208 208 check_find(ipt2.EscapedCommand, case)
209 209
210 210 def test_transform_autocall():
211 211 for case in [AUTOCALL_QUOTE, AUTOCALL_QUOTE2, AUTOCALL_PAREN]:
212 212 print("Testing %r" % case[0])
213 213 check_transform(ipt2.EscapedCommand, case)
214 214
215 215 def test_find_help():
216 216 for case in [SIMPLE_HELP, DETAILED_HELP, MAGIC_HELP, HELP_IN_EXPR]:
217 217 check_find(ipt2.HelpEnd, case)
218 218
219 219 tf = check_find(ipt2.HelpEnd, HELP_CONTINUED_LINE)
220 220 nt.assert_equal(tf.q_line, 1)
221 221 nt.assert_equal(tf.q_col, 3)
222 222
223 223 tf = check_find(ipt2.HelpEnd, HELP_MULTILINE)
224 224 nt.assert_equal(tf.q_line, 1)
225 225 nt.assert_equal(tf.q_col, 8)
226 226
227 227 # ? in a comment does not trigger help
228 228 check_find(ipt2.HelpEnd, (["foo # bar?\n"], None, None), match=False)
229 229 # Nor in a string
230 230 check_find(ipt2.HelpEnd, (["foo = '''bar?\n"], None, None), match=False)
231 231
232 232 def test_transform_help():
233 233 tf = ipt2.HelpEnd((1, 0), (1, 9))
234 234 nt.assert_equal(tf.transform(HELP_IN_EXPR[0]), HELP_IN_EXPR[2])
235 235
236 236 tf = ipt2.HelpEnd((1, 0), (2, 3))
237 237 nt.assert_equal(tf.transform(HELP_CONTINUED_LINE[0]), HELP_CONTINUED_LINE[2])
238 238
239 239 tf = ipt2.HelpEnd((1, 0), (2, 8))
240 240 nt.assert_equal(tf.transform(HELP_MULTILINE[0]), HELP_MULTILINE[2])
241 241
242 242 tf = ipt2.HelpEnd((1, 0), (1, 0))
243 243 nt.assert_equal(tf.transform(HELP_UNICODE[0]), HELP_UNICODE[2])
244 244
245 245 def test_find_assign_op_dedent():
246 246 """
247 247 be careful that empty token like dedent are not counted as parens
248 248 """
249 249 class Tk:
250 250 def __init__(self, s):
251 251 self.string = s
252 252
253 253 nt.assert_equal(_find_assign_op([Tk(s) for s in ('','a','=','b')]), 2)
254 254 nt.assert_equal(_find_assign_op([Tk(s) for s in ('','(', 'a','=','b', ')', '=' ,'5')]), 6)
255 255
256 256 def test_check_complete():
257 257 cc = ipt2.TransformerManager().check_complete
258 258 nt.assert_equal(cc("a = 1"), ("complete", None))
259 259 nt.assert_equal(cc("for a in range(5):"), ("incomplete", 4))
260 260 nt.assert_equal(cc("for a in range(5):\n if a > 0:"), ("incomplete", 8))
261 261 nt.assert_equal(cc("raise = 2"), ("invalid", None))
262 262 nt.assert_equal(cc("a = [1,\n2,"), ("incomplete", 0))
263 263 nt.assert_equal(cc("(\n))"), ("incomplete", 0))
264 264 nt.assert_equal(cc("\\\r\n"), ("incomplete", 0))
265 265 nt.assert_equal(cc("a = '''\n hi"), ("incomplete", 3))
266 266 nt.assert_equal(cc("def a():\n x=1\n global x"), ("invalid", None))
267 267 nt.assert_equal(cc("a \\ "), ("invalid", None)) # Nothing allowed after backslash
268 268 nt.assert_equal(cc("1\\\n+2"), ("complete", None))
269 269 nt.assert_equal(cc("exit"), ("complete", None))
270 270
271 271 example = dedent("""
272 272 if True:
273 273 a=1""" )
274 274
275 275 nt.assert_equal(cc(example), ('incomplete', 4))
276 276 nt.assert_equal(cc(example+'\n'), ('complete', None))
277 277 nt.assert_equal(cc(example+'\n '), ('complete', None))
278 278
279 279 # no need to loop on all the letters/numbers.
280 280 short = '12abAB'+string.printable[62:]
281 281 for c in short:
282 282 # test does not raise:
283 283 cc(c)
284 284 for k in short:
285 285 cc(c+k)
286 286
287 287 nt.assert_equal(cc("def f():\n x=0\n \\\n "), ('incomplete', 2))
288 288
289 289 def test_check_complete_II():
290 290 """
291 291 Test that multiple line strings are properly handled.
292 292
293 293 Separate test function for convenience
294 294
295 295 """
296 296 cc = ipt2.TransformerManager().check_complete
297 297 nt.assert_equal(cc('''def foo():\n """'''), ('incomplete', 4))
298 298
299 299
300 300 def test_check_complete_invalidates_sunken_brackets():
301 301 """
302 302 Test that a single line with more closing brackets than the opening ones is
303 interpretted as invalid
303 interpreted as invalid
304 304 """
305 305 cc = ipt2.TransformerManager().check_complete
306 306 nt.assert_equal(cc(")"), ("invalid", None))
307 307 nt.assert_equal(cc("]"), ("invalid", None))
308 308 nt.assert_equal(cc("}"), ("invalid", None))
309 309 nt.assert_equal(cc(")("), ("invalid", None))
310 310 nt.assert_equal(cc("]["), ("invalid", None))
311 311 nt.assert_equal(cc("}{"), ("invalid", None))
312 312 nt.assert_equal(cc("]()("), ("invalid", None))
313 313 nt.assert_equal(cc("())("), ("invalid", None))
314 314 nt.assert_equal(cc(")[]("), ("invalid", None))
315 315 nt.assert_equal(cc("()]("), ("invalid", None))
316 316
317 317
318 318 def test_null_cleanup_transformer():
319 319 manager = ipt2.TransformerManager()
320 320 manager.cleanup_transforms.insert(0, null_cleanup_transformer)
321 321 assert manager.transform_cell("") == ""
322 322
323 323
324 324
325 325
326 326 def test_side_effects_I():
327 327 count = 0
328 328 def counter(lines):
329 329 nonlocal count
330 330 count += 1
331 331 return lines
332 332
333 333 counter.has_side_effects = True
334 334
335 335 manager = ipt2.TransformerManager()
336 336 manager.cleanup_transforms.insert(0, counter)
337 337 assert manager.check_complete("a=1\n") == ('complete', None)
338 338 assert count == 0
339 339
340 340
341 341
342 342
343 343 def test_side_effects_II():
344 344 count = 0
345 345 def counter(lines):
346 346 nonlocal count
347 347 count += 1
348 348 return lines
349 349
350 350 counter.has_side_effects = True
351 351
352 352 manager = ipt2.TransformerManager()
353 353 manager.line_transforms.insert(0, counter)
354 354 assert manager.check_complete("b=1\n") == ('complete', None)
355 355 assert count == 0
@@ -1,601 +1,601 b''
1 1 # encoding: utf-8
2 2 """Tests for code execution (%run and related), which is particularly tricky.
3 3
4 4 Because of how %run manages namespaces, and the fact that we are trying here to
5 5 verify subtle object deletion and reference counting issues, the %run tests
6 6 will be kept in this separate file. This makes it easier to aggregate in one
7 7 place the tricks needed to handle it; most other magics are much easier to test
8 8 and we do so in a common test_magic file.
9 9
10 10 Note that any test using `run -i` should make sure to do a `reset` afterwards,
11 11 as otherwise it may influence later tests.
12 12 """
13 13
14 14 # Copyright (c) IPython Development Team.
15 15 # Distributed under the terms of the Modified BSD License.
16 16
17 17
18 18
19 19 import functools
20 20 import os
21 21 from os.path import join as pjoin
22 22 import random
23 23 import string
24 24 import sys
25 25 import textwrap
26 26 import unittest
27 27 from unittest.mock import patch
28 28
29 29 import nose.tools as nt
30 30 from nose import SkipTest
31 31
32 32 from IPython.testing import decorators as dec
33 33 from IPython.testing import tools as tt
34 34 from IPython.utils.io import capture_output
35 35 from IPython.utils.tempdir import TemporaryDirectory
36 36 from IPython.core import debugger
37 37
38 38 def doctest_refbug():
39 39 """Very nasty problem with references held by multiple runs of a script.
40 40 See: https://github.com/ipython/ipython/issues/141
41 41
42 42 In [1]: _ip.clear_main_mod_cache()
43 43 # random
44 44
45 45 In [2]: %run refbug
46 46
47 47 In [3]: call_f()
48 48 lowercased: hello
49 49
50 50 In [4]: %run refbug
51 51
52 52 In [5]: call_f()
53 53 lowercased: hello
54 54 lowercased: hello
55 55 """
56 56
57 57
58 58 def doctest_run_builtins():
59 59 r"""Check that %run doesn't damage __builtins__.
60 60
61 61 In [1]: import tempfile
62 62
63 63 In [2]: bid1 = id(__builtins__)
64 64
65 65 In [3]: fname = tempfile.mkstemp('.py')[1]
66 66
67 67 In [3]: f = open(fname,'w')
68 68
69 69 In [4]: dummy= f.write('pass\n')
70 70
71 71 In [5]: f.flush()
72 72
73 73 In [6]: t1 = type(__builtins__)
74 74
75 75 In [7]: %run $fname
76 76
77 77 In [7]: f.close()
78 78
79 79 In [8]: bid2 = id(__builtins__)
80 80
81 81 In [9]: t2 = type(__builtins__)
82 82
83 83 In [10]: t1 == t2
84 84 Out[10]: True
85 85
86 86 In [10]: bid1 == bid2
87 87 Out[10]: True
88 88
89 89 In [12]: try:
90 90 ....: os.unlink(fname)
91 91 ....: except:
92 92 ....: pass
93 93 ....:
94 94 """
95 95
96 96
97 97 def doctest_run_option_parser():
98 98 r"""Test option parser in %run.
99 99
100 100 In [1]: %run print_argv.py
101 101 []
102 102
103 103 In [2]: %run print_argv.py print*.py
104 104 ['print_argv.py']
105 105
106 106 In [3]: %run -G print_argv.py print*.py
107 107 ['print*.py']
108 108
109 109 """
110 110
111 111
112 112 @dec.skip_win32
113 113 def doctest_run_option_parser_for_posix():
114 114 r"""Test option parser in %run (Linux/OSX specific).
115 115
116 116 You need double quote to escape glob in POSIX systems:
117 117
118 118 In [1]: %run print_argv.py print\\*.py
119 119 ['print*.py']
120 120
121 121 You can't use quote to escape glob in POSIX systems:
122 122
123 123 In [2]: %run print_argv.py 'print*.py'
124 124 ['print_argv.py']
125 125
126 126 """
127 127
128 128
129 129 @dec.skip_if_not_win32
130 130 def doctest_run_option_parser_for_windows():
131 131 r"""Test option parser in %run (Windows specific).
132 132
133 133 In Windows, you can't escape ``*` `by backslash:
134 134
135 135 In [1]: %run print_argv.py print\\*.py
136 136 ['print\\*.py']
137 137
138 138 You can use quote to escape glob:
139 139
140 140 In [2]: %run print_argv.py 'print*.py'
141 141 ['print*.py']
142 142
143 143 """
144 144
145 145
146 146 def doctest_reset_del():
147 147 """Test that resetting doesn't cause errors in __del__ methods.
148 148
149 149 In [2]: class A(object):
150 150 ...: def __del__(self):
151 151 ...: print(str("Hi"))
152 152 ...:
153 153
154 154 In [3]: a = A()
155 155
156 156 In [4]: get_ipython().reset()
157 157 Hi
158 158
159 159 In [5]: 1+1
160 160 Out[5]: 2
161 161 """
162 162
163 163 # For some tests, it will be handy to organize them in a class with a common
164 164 # setup that makes a temp file
165 165
166 166 class TestMagicRunPass(tt.TempFileMixin):
167 167
168 168 def setUp(self):
169 169 content = "a = [1,2,3]\nb = 1"
170 170 self.mktmp(content)
171 171
172 172 def run_tmpfile(self):
173 173 _ip = get_ipython()
174 174 # This fails on Windows if self.tmpfile.name has spaces or "~" in it.
175 175 # See below and ticket https://bugs.launchpad.net/bugs/366353
176 176 _ip.magic('run %s' % self.fname)
177 177
178 178 def run_tmpfile_p(self):
179 179 _ip = get_ipython()
180 180 # This fails on Windows if self.tmpfile.name has spaces or "~" in it.
181 181 # See below and ticket https://bugs.launchpad.net/bugs/366353
182 182 _ip.magic('run -p %s' % self.fname)
183 183
184 184 def test_builtins_id(self):
185 185 """Check that %run doesn't damage __builtins__ """
186 186 _ip = get_ipython()
187 187 # Test that the id of __builtins__ is not modified by %run
188 188 bid1 = id(_ip.user_ns['__builtins__'])
189 189 self.run_tmpfile()
190 190 bid2 = id(_ip.user_ns['__builtins__'])
191 191 nt.assert_equal(bid1, bid2)
192 192
193 193 def test_builtins_type(self):
194 194 """Check that the type of __builtins__ doesn't change with %run.
195 195
196 196 However, the above could pass if __builtins__ was already modified to
197 197 be a dict (it should be a module) by a previous use of %run. So we
198 198 also check explicitly that it really is a module:
199 199 """
200 200 _ip = get_ipython()
201 201 self.run_tmpfile()
202 202 nt.assert_equal(type(_ip.user_ns['__builtins__']),type(sys))
203 203
204 204 def test_run_profile( self ):
205 205 """Test that the option -p, which invokes the profiler, do not
206 206 crash by invoking execfile"""
207 207 self.run_tmpfile_p()
208 208
209 209 def test_run_debug_twice(self):
210 210 # https://github.com/ipython/ipython/issues/10028
211 211 _ip = get_ipython()
212 212 with tt.fake_input(['c']):
213 213 _ip.magic('run -d %s' % self.fname)
214 214 with tt.fake_input(['c']):
215 215 _ip.magic('run -d %s' % self.fname)
216 216
217 217 def test_run_debug_twice_with_breakpoint(self):
218 218 """Make a valid python temp file."""
219 219 _ip = get_ipython()
220 220 with tt.fake_input(['b 2', 'c', 'c']):
221 221 _ip.magic('run -d %s' % self.fname)
222 222
223 223 with tt.fake_input(['c']):
224 224 with tt.AssertNotPrints('KeyError'):
225 225 _ip.magic('run -d %s' % self.fname)
226 226
227 227
228 228 class TestMagicRunSimple(tt.TempFileMixin):
229 229
230 230 def test_simpledef(self):
231 231 """Test that simple class definitions work."""
232 232 src = ("class foo: pass\n"
233 233 "def f(): return foo()")
234 234 self.mktmp(src)
235 235 _ip.magic('run %s' % self.fname)
236 236 _ip.run_cell('t = isinstance(f(), foo)')
237 237 nt.assert_true(_ip.user_ns['t'])
238 238
239 239 def test_obj_del(self):
240 240 """Test that object's __del__ methods are called on exit."""
241 241 if sys.platform == 'win32':
242 242 try:
243 243 import win32api
244 244 except ImportError as e:
245 245 raise SkipTest("Test requires pywin32") from e
246 246 src = ("class A(object):\n"
247 247 " def __del__(self):\n"
248 248 " print('object A deleted')\n"
249 249 "a = A()\n")
250 250 self.mktmp(src)
251 251 err = None
252 252 tt.ipexec_validate(self.fname, 'object A deleted', err)
253 253
254 254 def test_aggressive_namespace_cleanup(self):
255 255 """Test that namespace cleanup is not too aggressive GH-238
256 256
257 257 Returning from another run magic deletes the namespace"""
258 258 # see ticket https://github.com/ipython/ipython/issues/238
259 259
260 260 with tt.TempFileMixin() as empty:
261 261 empty.mktmp('')
262 262 # On Windows, the filename will have \users in it, so we need to use the
263 263 # repr so that the \u becomes \\u.
264 264 src = ("ip = get_ipython()\n"
265 265 "for i in range(5):\n"
266 266 " try:\n"
267 267 " ip.magic(%r)\n"
268 268 " except NameError as e:\n"
269 269 " print(i)\n"
270 270 " break\n" % ('run ' + empty.fname))
271 271 self.mktmp(src)
272 272 _ip.magic('run %s' % self.fname)
273 273 _ip.run_cell('ip == get_ipython()')
274 274 nt.assert_equal(_ip.user_ns['i'], 4)
275 275
276 276 def test_run_second(self):
277 277 """Test that running a second file doesn't clobber the first, gh-3547
278 278 """
279 279 self.mktmp("avar = 1\n"
280 280 "def afunc():\n"
281 281 " return avar\n")
282 282
283 283 with tt.TempFileMixin() as empty:
284 284 empty.mktmp("")
285 285
286 286 _ip.magic('run %s' % self.fname)
287 287 _ip.magic('run %s' % empty.fname)
288 288 nt.assert_equal(_ip.user_ns['afunc'](), 1)
289 289
290 290 @dec.skip_win32
291 291 def test_tclass(self):
292 292 mydir = os.path.dirname(__file__)
293 293 tc = os.path.join(mydir, 'tclass')
294 294 src = ("%%run '%s' C-first\n"
295 295 "%%run '%s' C-second\n"
296 296 "%%run '%s' C-third\n") % (tc, tc, tc)
297 297 self.mktmp(src, '.ipy')
298 298 out = """\
299 299 ARGV 1-: ['C-first']
300 300 ARGV 1-: ['C-second']
301 301 tclass.py: deleting object: C-first
302 302 ARGV 1-: ['C-third']
303 303 tclass.py: deleting object: C-second
304 304 tclass.py: deleting object: C-third
305 305 """
306 306 err = None
307 307 tt.ipexec_validate(self.fname, out, err)
308 308
309 309 def test_run_i_after_reset(self):
310 310 """Check that %run -i still works after %reset (gh-693)"""
311 311 src = "yy = zz\n"
312 312 self.mktmp(src)
313 313 _ip.run_cell("zz = 23")
314 314 try:
315 315 _ip.magic('run -i %s' % self.fname)
316 316 nt.assert_equal(_ip.user_ns['yy'], 23)
317 317 finally:
318 318 _ip.magic('reset -f')
319 319
320 320 _ip.run_cell("zz = 23")
321 321 try:
322 322 _ip.magic('run -i %s' % self.fname)
323 323 nt.assert_equal(_ip.user_ns['yy'], 23)
324 324 finally:
325 325 _ip.magic('reset -f')
326 326
327 327 def test_unicode(self):
328 328 """Check that files in odd encodings are accepted."""
329 329 mydir = os.path.dirname(__file__)
330 330 na = os.path.join(mydir, 'nonascii.py')
331 331 _ip.magic('run "%s"' % na)
332 332 nt.assert_equal(_ip.user_ns['u'], u'Ўт№Ф')
333 333
334 334 def test_run_py_file_attribute(self):
335 335 """Test handling of `__file__` attribute in `%run <file>.py`."""
336 336 src = "t = __file__\n"
337 337 self.mktmp(src)
338 338 _missing = object()
339 339 file1 = _ip.user_ns.get('__file__', _missing)
340 340 _ip.magic('run %s' % self.fname)
341 341 file2 = _ip.user_ns.get('__file__', _missing)
342 342
343 343 # Check that __file__ was equal to the filename in the script's
344 344 # namespace.
345 345 nt.assert_equal(_ip.user_ns['t'], self.fname)
346 346
347 347 # Check that __file__ was not leaked back into user_ns.
348 348 nt.assert_equal(file1, file2)
349 349
350 350 def test_run_ipy_file_attribute(self):
351 351 """Test handling of `__file__` attribute in `%run <file.ipy>`."""
352 352 src = "t = __file__\n"
353 353 self.mktmp(src, ext='.ipy')
354 354 _missing = object()
355 355 file1 = _ip.user_ns.get('__file__', _missing)
356 356 _ip.magic('run %s' % self.fname)
357 357 file2 = _ip.user_ns.get('__file__', _missing)
358 358
359 359 # Check that __file__ was equal to the filename in the script's
360 360 # namespace.
361 361 nt.assert_equal(_ip.user_ns['t'], self.fname)
362 362
363 363 # Check that __file__ was not leaked back into user_ns.
364 364 nt.assert_equal(file1, file2)
365 365
366 366 def test_run_formatting(self):
367 367 """ Test that %run -t -N<N> does not raise a TypeError for N > 1."""
368 368 src = "pass"
369 369 self.mktmp(src)
370 370 _ip.magic('run -t -N 1 %s' % self.fname)
371 371 _ip.magic('run -t -N 10 %s' % self.fname)
372 372
373 373 def test_ignore_sys_exit(self):
374 374 """Test the -e option to ignore sys.exit()"""
375 375 src = "import sys; sys.exit(1)"
376 376 self.mktmp(src)
377 377 with tt.AssertPrints('SystemExit'):
378 378 _ip.magic('run %s' % self.fname)
379 379
380 380 with tt.AssertNotPrints('SystemExit'):
381 381 _ip.magic('run -e %s' % self.fname)
382 382
383 383 def test_run_nb(self):
384 384 """Test %run notebook.ipynb"""
385 385 from nbformat import v4, writes
386 386 nb = v4.new_notebook(
387 387 cells=[
388 388 v4.new_markdown_cell("The Ultimate Question of Everything"),
389 389 v4.new_code_cell("answer=42")
390 390 ]
391 391 )
392 392 src = writes(nb, version=4)
393 393 self.mktmp(src, ext='.ipynb')
394 394
395 395 _ip.magic("run %s" % self.fname)
396 396
397 397 nt.assert_equal(_ip.user_ns['answer'], 42)
398 398
399 399 def test_run_nb_error(self):
400 400 """Test %run notebook.ipynb error"""
401 401 from nbformat import v4, writes
402 402 # %run when a file name isn't provided
403 403 nt.assert_raises(Exception, _ip.magic, "run")
404 404
405 405 # %run when a file doesn't exist
406 406 nt.assert_raises(Exception, _ip.magic, "run foobar.ipynb")
407 407
408 408 # %run on a notebook with an error
409 409 nb = v4.new_notebook(
410 410 cells=[
411 411 v4.new_code_cell("0/0")
412 412 ]
413 413 )
414 414 src = writes(nb, version=4)
415 415 self.mktmp(src, ext='.ipynb')
416 416 nt.assert_raises(Exception, _ip.magic, "run %s" % self.fname)
417 417
418 418 def test_file_options(self):
419 419 src = ('import sys\n'
420 420 'a = " ".join(sys.argv[1:])\n')
421 421 self.mktmp(src)
422 422 test_opts = '-x 3 --verbose'
423 423 _ip.run_line_magic("run", '{0} {1}'.format(self.fname, test_opts))
424 424 nt.assert_equal(_ip.user_ns['a'], test_opts)
425 425
426 426
427 427 class TestMagicRunWithPackage(unittest.TestCase):
428 428
429 429 def writefile(self, name, content):
430 430 path = os.path.join(self.tempdir.name, name)
431 431 d = os.path.dirname(path)
432 432 if not os.path.isdir(d):
433 433 os.makedirs(d)
434 434 with open(path, 'w') as f:
435 435 f.write(textwrap.dedent(content))
436 436
437 437 def setUp(self):
438 438 self.package = package = 'tmp{0}'.format(''.join([random.choice(string.ascii_letters) for i in range(10)]))
439 439 """Temporary (probably) valid python package name."""
440 440
441 441 self.value = int(random.random() * 10000)
442 442
443 443 self.tempdir = TemporaryDirectory()
444 444 self.__orig_cwd = os.getcwd()
445 445 sys.path.insert(0, self.tempdir.name)
446 446
447 447 self.writefile(os.path.join(package, '__init__.py'), '')
448 448 self.writefile(os.path.join(package, 'sub.py'), """
449 449 x = {0!r}
450 450 """.format(self.value))
451 451 self.writefile(os.path.join(package, 'relative.py'), """
452 452 from .sub import x
453 453 """)
454 454 self.writefile(os.path.join(package, 'absolute.py'), """
455 455 from {0}.sub import x
456 456 """.format(package))
457 457 self.writefile(os.path.join(package, 'args.py'), """
458 458 import sys
459 459 a = " ".join(sys.argv[1:])
460 460 """.format(package))
461 461
462 462 def tearDown(self):
463 463 os.chdir(self.__orig_cwd)
464 464 sys.path[:] = [p for p in sys.path if p != self.tempdir.name]
465 465 self.tempdir.cleanup()
466 466
467 467 def check_run_submodule(self, submodule, opts=''):
468 468 _ip.user_ns.pop('x', None)
469 469 _ip.magic('run {2} -m {0}.{1}'.format(self.package, submodule, opts))
470 470 self.assertEqual(_ip.user_ns['x'], self.value,
471 471 'Variable `x` is not loaded from module `{0}`.'
472 472 .format(submodule))
473 473
474 474 def test_run_submodule_with_absolute_import(self):
475 475 self.check_run_submodule('absolute')
476 476
477 477 def test_run_submodule_with_relative_import(self):
478 478 """Run submodule that has a relative import statement (#2727)."""
479 479 self.check_run_submodule('relative')
480 480
481 481 def test_prun_submodule_with_absolute_import(self):
482 482 self.check_run_submodule('absolute', '-p')
483 483
484 484 def test_prun_submodule_with_relative_import(self):
485 485 self.check_run_submodule('relative', '-p')
486 486
487 487 def with_fake_debugger(func):
488 488 @functools.wraps(func)
489 489 def wrapper(*args, **kwds):
490 490 with patch.object(debugger.Pdb, 'run', staticmethod(eval)):
491 491 return func(*args, **kwds)
492 492 return wrapper
493 493
494 494 @with_fake_debugger
495 495 def test_debug_run_submodule_with_absolute_import(self):
496 496 self.check_run_submodule('absolute', '-d')
497 497
498 498 @with_fake_debugger
499 499 def test_debug_run_submodule_with_relative_import(self):
500 500 self.check_run_submodule('relative', '-d')
501 501
502 502 def test_module_options(self):
503 503 _ip.user_ns.pop('a', None)
504 504 test_opts = '-x abc -m test'
505 505 _ip.run_line_magic('run', '-m {0}.args {1}'.format(self.package, test_opts))
506 506 nt.assert_equal(_ip.user_ns['a'], test_opts)
507 507
508 508 def test_module_options_with_separator(self):
509 509 _ip.user_ns.pop('a', None)
510 510 test_opts = '-x abc -m test'
511 511 _ip.run_line_magic('run', '-m {0}.args -- {1}'.format(self.package, test_opts))
512 512 nt.assert_equal(_ip.user_ns['a'], test_opts)
513 513
514 514 def test_run__name__():
515 515 with TemporaryDirectory() as td:
516 516 path = pjoin(td, 'foo.py')
517 517 with open(path, 'w') as f:
518 518 f.write("q = __name__")
519 519
520 520 _ip.user_ns.pop('q', None)
521 521 _ip.magic('run {}'.format(path))
522 522 nt.assert_equal(_ip.user_ns.pop('q'), '__main__')
523 523
524 524 _ip.magic('run -n {}'.format(path))
525 525 nt.assert_equal(_ip.user_ns.pop('q'), 'foo')
526 526
527 527 try:
528 528 _ip.magic('run -i -n {}'.format(path))
529 529 nt.assert_equal(_ip.user_ns.pop('q'), 'foo')
530 530 finally:
531 531 _ip.magic('reset -f')
532 532
533 533
534 534 def test_run_tb():
535 535 """Test traceback offset in %run"""
536 536 with TemporaryDirectory() as td:
537 537 path = pjoin(td, 'foo.py')
538 538 with open(path, 'w') as f:
539 539 f.write('\n'.join([
540 540 "def foo():",
541 541 " return bar()",
542 542 "def bar():",
543 543 " raise RuntimeError('hello!')",
544 544 "foo()",
545 545 ]))
546 546 with capture_output() as io:
547 547 _ip.magic('run {}'.format(path))
548 548 out = io.stdout
549 549 nt.assert_not_in("execfile", out)
550 550 nt.assert_in("RuntimeError", out)
551 551 nt.assert_equal(out.count("---->"), 3)
552 552 del ip.user_ns['bar']
553 553 del ip.user_ns['foo']
554 554
555 555
556 556 def test_multiprocessing_run():
557 557 """Set we can run mutiprocesgin without messing up up main namespace
558 558
559 559 Note that import `nose.tools as nt` mdify the value s
560 sys.module['__mp_main__'] so wee need to temporarily set it to None to test
560 sys.module['__mp_main__'] so we need to temporarily set it to None to test
561 561 the issue.
562 562 """
563 563 with TemporaryDirectory() as td:
564 564 mpm = sys.modules.get('__mp_main__')
565 565 assert mpm is not None
566 566 sys.modules['__mp_main__'] = None
567 567 try:
568 568 path = pjoin(td, 'test.py')
569 569 with open(path, 'w') as f:
570 570 f.write("import multiprocessing\nprint('hoy')")
571 571 with capture_output() as io:
572 572 _ip.run_line_magic('run', path)
573 573 _ip.run_cell("i_m_undefined")
574 574 out = io.stdout
575 575 nt.assert_in("hoy", out)
576 576 nt.assert_not_in("AttributeError", out)
577 577 nt.assert_in("NameError", out)
578 578 nt.assert_equal(out.count("---->"), 1)
579 579 except:
580 580 raise
581 581 finally:
582 582 sys.modules['__mp_main__'] = mpm
583 583
584 584 @dec.knownfailureif(sys.platform == 'win32', "writes to io.stdout aren't captured on Windows")
585 585 def test_script_tb():
586 586 """Test traceback offset in `ipython script.py`"""
587 587 with TemporaryDirectory() as td:
588 588 path = pjoin(td, 'foo.py')
589 589 with open(path, 'w') as f:
590 590 f.write('\n'.join([
591 591 "def foo():",
592 592 " return bar()",
593 593 "def bar():",
594 594 " raise RuntimeError('hello!')",
595 595 "foo()",
596 596 ]))
597 597 out, err = tt.ipexec(path)
598 598 nt.assert_not_in("execfile", out)
599 599 nt.assert_in("RuntimeError", out)
600 600 nt.assert_equal(out.count("---->"), 3)
601 601
@@ -1,555 +1,555 b''
1 1 """Tests for autoreload extension.
2 2 """
3 3 # -----------------------------------------------------------------------------
4 4 # Copyright (c) 2012 IPython Development Team.
5 5 #
6 6 # Distributed under the terms of the Modified BSD License.
7 7 #
8 8 # The full license is in the file COPYING.txt, distributed with this software.
9 9 # -----------------------------------------------------------------------------
10 10
11 11 # -----------------------------------------------------------------------------
12 12 # Imports
13 13 # -----------------------------------------------------------------------------
14 14
15 15 import os
16 16 import sys
17 17 import tempfile
18 18 import textwrap
19 19 import shutil
20 20 import random
21 21 import time
22 22 from io import StringIO
23 23
24 24 import nose.tools as nt
25 25 import IPython.testing.tools as tt
26 26
27 27 from unittest import TestCase
28 28
29 29 from IPython.extensions.autoreload import AutoreloadMagics
30 30 from IPython.core.events import EventManager, pre_run_cell
31 31
32 32 # -----------------------------------------------------------------------------
33 33 # Test fixture
34 34 # -----------------------------------------------------------------------------
35 35
36 36 noop = lambda *a, **kw: None
37 37
38 38
39 39 class FakeShell:
40 40 def __init__(self):
41 41 self.ns = {}
42 42 self.user_ns = self.ns
43 43 self.user_ns_hidden = {}
44 44 self.events = EventManager(self, {"pre_run_cell", pre_run_cell})
45 45 self.auto_magics = AutoreloadMagics(shell=self)
46 46 self.events.register("pre_run_cell", self.auto_magics.pre_run_cell)
47 47
48 48 register_magics = set_hook = noop
49 49
50 50 def run_code(self, code):
51 51 self.events.trigger("pre_run_cell")
52 52 exec(code, self.user_ns)
53 53 self.auto_magics.post_execute_hook()
54 54
55 55 def push(self, items):
56 56 self.ns.update(items)
57 57
58 58 def magic_autoreload(self, parameter):
59 59 self.auto_magics.autoreload(parameter)
60 60
61 61 def magic_aimport(self, parameter, stream=None):
62 62 self.auto_magics.aimport(parameter, stream=stream)
63 63 self.auto_magics.post_execute_hook()
64 64
65 65
66 66 class Fixture(TestCase):
67 67 """Fixture for creating test module files"""
68 68
69 69 test_dir = None
70 70 old_sys_path = None
71 71 filename_chars = "abcdefghijklmopqrstuvwxyz0123456789"
72 72
73 73 def setUp(self):
74 74 self.test_dir = tempfile.mkdtemp()
75 75 self.old_sys_path = list(sys.path)
76 76 sys.path.insert(0, self.test_dir)
77 77 self.shell = FakeShell()
78 78
79 79 def tearDown(self):
80 80 shutil.rmtree(self.test_dir)
81 81 sys.path = self.old_sys_path
82 82
83 83 self.test_dir = None
84 84 self.old_sys_path = None
85 85 self.shell = None
86 86
87 87 def get_module(self):
88 88 module_name = "tmpmod_" + "".join(random.sample(self.filename_chars, 20))
89 89 if module_name in sys.modules:
90 90 del sys.modules[module_name]
91 91 file_name = os.path.join(self.test_dir, module_name + ".py")
92 92 return module_name, file_name
93 93
94 94 def write_file(self, filename, content):
95 95 """
96 96 Write a file, and force a timestamp difference of at least one second
97 97
98 98 Notes
99 99 -----
100 100 Python's .pyc files record the timestamp of their compilation
101 101 with a time resolution of one second.
102 102
103 103 Therefore, we need to force a timestamp difference between .py
104 104 and .pyc, without having the .py file be timestamped in the
105 105 future, and without changing the timestamp of the .pyc file
106 106 (because that is stored in the file). The only reliable way
107 107 to achieve this seems to be to sleep.
108 108 """
109 109 content = textwrap.dedent(content)
110 110 # Sleep one second + eps
111 111 time.sleep(1.05)
112 112
113 113 # Write
114 114 with open(filename, "w") as f:
115 115 f.write(content)
116 116
117 117 def new_module(self, code):
118 118 code = textwrap.dedent(code)
119 119 mod_name, mod_fn = self.get_module()
120 120 with open(mod_fn, "w") as f:
121 121 f.write(code)
122 122 return mod_name, mod_fn
123 123
124 124
125 125 # -----------------------------------------------------------------------------
126 126 # Test automatic reloading
127 127 # -----------------------------------------------------------------------------
128 128
129 129
130 130 def pickle_get_current_class(obj):
131 131 """
132 132 Original issue comes from pickle; hence the name.
133 133 """
134 134 name = obj.__class__.__name__
135 135 module_name = getattr(obj, "__module__", None)
136 136 obj2 = sys.modules[module_name]
137 137 for subpath in name.split("."):
138 138 obj2 = getattr(obj2, subpath)
139 139 return obj2
140 140
141 141
142 142 class TestAutoreload(Fixture):
143 143 def test_reload_enums(self):
144 144 mod_name, mod_fn = self.new_module(
145 145 textwrap.dedent(
146 146 """
147 147 from enum import Enum
148 148 class MyEnum(Enum):
149 149 A = 'A'
150 150 B = 'B'
151 151 """
152 152 )
153 153 )
154 154 self.shell.magic_autoreload("2")
155 155 self.shell.magic_aimport(mod_name)
156 156 self.write_file(
157 157 mod_fn,
158 158 textwrap.dedent(
159 159 """
160 160 from enum import Enum
161 161 class MyEnum(Enum):
162 162 A = 'A'
163 163 B = 'B'
164 164 C = 'C'
165 165 """
166 166 ),
167 167 )
168 168 with tt.AssertNotPrints(
169 169 ("[autoreload of %s failed:" % mod_name), channel="stderr"
170 170 ):
171 171 self.shell.run_code("pass") # trigger another reload
172 172
173 173 def test_reload_class_type(self):
174 174 self.shell.magic_autoreload("2")
175 175 mod_name, mod_fn = self.new_module(
176 176 """
177 177 class Test():
178 178 def meth(self):
179 179 return "old"
180 180 """
181 181 )
182 182 assert "test" not in self.shell.ns
183 183 assert "result" not in self.shell.ns
184 184
185 185 self.shell.run_code("from %s import Test" % mod_name)
186 186 self.shell.run_code("test = Test()")
187 187
188 188 self.write_file(
189 189 mod_fn,
190 190 """
191 191 class Test():
192 192 def meth(self):
193 193 return "new"
194 194 """,
195 195 )
196 196
197 197 test_object = self.shell.ns["test"]
198 198
199 199 # important to trigger autoreload logic !
200 200 self.shell.run_code("pass")
201 201
202 202 test_class = pickle_get_current_class(test_object)
203 203 assert isinstance(test_object, test_class)
204 204
205 205 # extra check.
206 206 self.shell.run_code("import pickle")
207 207 self.shell.run_code("p = pickle.dumps(test)")
208 208
209 209 def test_reload_class_attributes(self):
210 210 self.shell.magic_autoreload("2")
211 211 mod_name, mod_fn = self.new_module(
212 212 textwrap.dedent(
213 213 """
214 214 class MyClass:
215 215
216 216 def __init__(self, a=10):
217 217 self.a = a
218 218 self.b = 22
219 219 # self.toto = 33
220 220
221 221 def square(self):
222 222 print('compute square')
223 223 return self.a*self.a
224 224 """
225 225 )
226 226 )
227 227 self.shell.run_code("from %s import MyClass" % mod_name)
228 228 self.shell.run_code("first = MyClass(5)")
229 229 self.shell.run_code("first.square()")
230 230 with nt.assert_raises(AttributeError):
231 231 self.shell.run_code("first.cube()")
232 232 with nt.assert_raises(AttributeError):
233 233 self.shell.run_code("first.power(5)")
234 234 self.shell.run_code("first.b")
235 235 with nt.assert_raises(AttributeError):
236 236 self.shell.run_code("first.toto")
237 237
238 238 # remove square, add power
239 239
240 240 self.write_file(
241 241 mod_fn,
242 242 textwrap.dedent(
243 243 """
244 244 class MyClass:
245 245
246 246 def __init__(self, a=10):
247 247 self.a = a
248 248 self.b = 11
249 249
250 250 def power(self, p):
251 251 print('compute power '+str(p))
252 252 return self.a**p
253 253 """
254 254 ),
255 255 )
256 256
257 257 self.shell.run_code("second = MyClass(5)")
258 258
259 259 for object_name in {"first", "second"}:
260 260 self.shell.run_code(f"{object_name}.power(5)")
261 261 with nt.assert_raises(AttributeError):
262 262 self.shell.run_code(f"{object_name}.cube()")
263 263 with nt.assert_raises(AttributeError):
264 264 self.shell.run_code(f"{object_name}.square()")
265 265 self.shell.run_code(f"{object_name}.b")
266 266 self.shell.run_code(f"{object_name}.a")
267 267 with nt.assert_raises(AttributeError):
268 268 self.shell.run_code(f"{object_name}.toto")
269 269
270 270 def test_autoload_newly_added_objects(self):
271 271 self.shell.magic_autoreload("3")
272 272 mod_code = """
273 273 def func1(): pass
274 274 """
275 275 mod_name, mod_fn = self.new_module(textwrap.dedent(mod_code))
276 276 self.shell.run_code(f"from {mod_name} import *")
277 277 self.shell.run_code("func1()")
278 278 with nt.assert_raises(NameError):
279 279 self.shell.run_code("func2()")
280 280 with nt.assert_raises(NameError):
281 281 self.shell.run_code("t = Test()")
282 282 with nt.assert_raises(NameError):
283 283 self.shell.run_code("number")
284 284
285 285 # ----------- TEST NEW OBJ LOADED --------------------------
286 286
287 287 new_code = """
288 288 def func1(): pass
289 289 def func2(): pass
290 290 class Test: pass
291 291 number = 0
292 292 from enum import Enum
293 293 class TestEnum(Enum):
294 294 A = 'a'
295 295 """
296 296 self.write_file(mod_fn, textwrap.dedent(new_code))
297 297
298 298 # test function now exists in shell's namespace namespace
299 299 self.shell.run_code("func2()")
300 300 # test function now exists in module's dict
301 301 self.shell.run_code(f"import sys; sys.modules['{mod_name}'].func2()")
302 302 # test class now exists
303 303 self.shell.run_code("t = Test()")
304 304 # test global built-in var now exists
305 305 self.shell.run_code("number")
306 # test the enumerations gets loaded succesfully
306 # test the enumerations gets loaded successfully
307 307 self.shell.run_code("TestEnum.A")
308 308
309 309 # ----------- TEST NEW OBJ CAN BE CHANGED --------------------
310 310
311 311 new_code = """
312 312 def func1(): return 'changed'
313 313 def func2(): return 'changed'
314 314 class Test:
315 315 def new_func(self):
316 316 return 'changed'
317 317 number = 1
318 318 from enum import Enum
319 319 class TestEnum(Enum):
320 320 A = 'a'
321 321 B = 'added'
322 322 """
323 323 self.write_file(mod_fn, textwrap.dedent(new_code))
324 324 self.shell.run_code("assert func1() == 'changed'")
325 325 self.shell.run_code("assert func2() == 'changed'")
326 326 self.shell.run_code("t = Test(); assert t.new_func() == 'changed'")
327 327 self.shell.run_code("assert number == 1")
328 328 self.shell.run_code("assert TestEnum.B.value == 'added'")
329 329
330 330 # ----------- TEST IMPORT FROM MODULE --------------------------
331 331
332 332 new_mod_code = """
333 333 from enum import Enum
334 334 class Ext(Enum):
335 335 A = 'ext'
336 336 def ext_func():
337 337 return 'ext'
338 338 class ExtTest:
339 339 def meth(self):
340 340 return 'ext'
341 341 ext_int = 2
342 342 """
343 343 new_mod_name, new_mod_fn = self.new_module(textwrap.dedent(new_mod_code))
344 344 current_mod_code = f"""
345 345 from {new_mod_name} import *
346 346 """
347 347 self.write_file(mod_fn, textwrap.dedent(current_mod_code))
348 348 self.shell.run_code("assert Ext.A.value == 'ext'")
349 349 self.shell.run_code("assert ext_func() == 'ext'")
350 350 self.shell.run_code("t = ExtTest(); assert t.meth() == 'ext'")
351 351 self.shell.run_code("assert ext_int == 2")
352 352
353 353 def _check_smoketest(self, use_aimport=True):
354 354 """
355 355 Functional test for the automatic reloader using either
356 356 '%autoreload 1' or '%autoreload 2'
357 357 """
358 358
359 359 mod_name, mod_fn = self.new_module(
360 360 """
361 361 x = 9
362 362
363 363 z = 123 # this item will be deleted
364 364
365 365 def foo(y):
366 366 return y + 3
367 367
368 368 class Baz(object):
369 369 def __init__(self, x):
370 370 self.x = x
371 371 def bar(self, y):
372 372 return self.x + y
373 373 @property
374 374 def quux(self):
375 375 return 42
376 376 def zzz(self):
377 377 '''This method will be deleted below'''
378 378 return 99
379 379
380 380 class Bar: # old-style class: weakref doesn't work for it on Python < 2.7
381 381 def foo(self):
382 382 return 1
383 383 """
384 384 )
385 385
386 386 #
387 387 # Import module, and mark for reloading
388 388 #
389 389 if use_aimport:
390 390 self.shell.magic_autoreload("1")
391 391 self.shell.magic_aimport(mod_name)
392 392 stream = StringIO()
393 393 self.shell.magic_aimport("", stream=stream)
394 394 nt.assert_in(("Modules to reload:\n%s" % mod_name), stream.getvalue())
395 395
396 396 with nt.assert_raises(ImportError):
397 397 self.shell.magic_aimport("tmpmod_as318989e89ds")
398 398 else:
399 399 self.shell.magic_autoreload("2")
400 400 self.shell.run_code("import %s" % mod_name)
401 401 stream = StringIO()
402 402 self.shell.magic_aimport("", stream=stream)
403 403 nt.assert_true(
404 404 "Modules to reload:\nall-except-skipped" in stream.getvalue()
405 405 )
406 406 nt.assert_in(mod_name, self.shell.ns)
407 407
408 408 mod = sys.modules[mod_name]
409 409
410 410 #
411 411 # Test module contents
412 412 #
413 413 old_foo = mod.foo
414 414 old_obj = mod.Baz(9)
415 415 old_obj2 = mod.Bar()
416 416
417 417 def check_module_contents():
418 418 nt.assert_equal(mod.x, 9)
419 419 nt.assert_equal(mod.z, 123)
420 420
421 421 nt.assert_equal(old_foo(0), 3)
422 422 nt.assert_equal(mod.foo(0), 3)
423 423
424 424 obj = mod.Baz(9)
425 425 nt.assert_equal(old_obj.bar(1), 10)
426 426 nt.assert_equal(obj.bar(1), 10)
427 427 nt.assert_equal(obj.quux, 42)
428 428 nt.assert_equal(obj.zzz(), 99)
429 429
430 430 obj2 = mod.Bar()
431 431 nt.assert_equal(old_obj2.foo(), 1)
432 432 nt.assert_equal(obj2.foo(), 1)
433 433
434 434 check_module_contents()
435 435
436 436 #
437 437 # Simulate a failed reload: no reload should occur and exactly
438 438 # one error message should be printed
439 439 #
440 440 self.write_file(
441 441 mod_fn,
442 442 """
443 443 a syntax error
444 444 """,
445 445 )
446 446
447 447 with tt.AssertPrints(
448 448 ("[autoreload of %s failed:" % mod_name), channel="stderr"
449 449 ):
450 450 self.shell.run_code("pass") # trigger reload
451 451 with tt.AssertNotPrints(
452 452 ("[autoreload of %s failed:" % mod_name), channel="stderr"
453 453 ):
454 454 self.shell.run_code("pass") # trigger another reload
455 455 check_module_contents()
456 456
457 457 #
458 458 # Rewrite module (this time reload should succeed)
459 459 #
460 460 self.write_file(
461 461 mod_fn,
462 462 """
463 463 x = 10
464 464
465 465 def foo(y):
466 466 return y + 4
467 467
468 468 class Baz(object):
469 469 def __init__(self, x):
470 470 self.x = x
471 471 def bar(self, y):
472 472 return self.x + y + 1
473 473 @property
474 474 def quux(self):
475 475 return 43
476 476
477 477 class Bar: # old-style class
478 478 def foo(self):
479 479 return 2
480 480 """,
481 481 )
482 482
483 483 def check_module_contents():
484 484 nt.assert_equal(mod.x, 10)
485 485 nt.assert_false(hasattr(mod, "z"))
486 486
487 487 nt.assert_equal(old_foo(0), 4) # superreload magic!
488 488 nt.assert_equal(mod.foo(0), 4)
489 489
490 490 obj = mod.Baz(9)
491 491 nt.assert_equal(old_obj.bar(1), 11) # superreload magic!
492 492 nt.assert_equal(obj.bar(1), 11)
493 493
494 494 nt.assert_equal(old_obj.quux, 43)
495 495 nt.assert_equal(obj.quux, 43)
496 496
497 497 nt.assert_false(hasattr(old_obj, "zzz"))
498 498 nt.assert_false(hasattr(obj, "zzz"))
499 499
500 500 obj2 = mod.Bar()
501 501 nt.assert_equal(old_obj2.foo(), 2)
502 502 nt.assert_equal(obj2.foo(), 2)
503 503
504 504 self.shell.run_code("pass") # trigger reload
505 505 check_module_contents()
506 506
507 507 #
508 508 # Another failure case: deleted file (shouldn't reload)
509 509 #
510 510 os.unlink(mod_fn)
511 511
512 512 self.shell.run_code("pass") # trigger reload
513 513 check_module_contents()
514 514
515 515 #
516 516 # Disable autoreload and rewrite module: no reload should occur
517 517 #
518 518 if use_aimport:
519 519 self.shell.magic_aimport("-" + mod_name)
520 520 stream = StringIO()
521 521 self.shell.magic_aimport("", stream=stream)
522 522 nt.assert_true(("Modules to skip:\n%s" % mod_name) in stream.getvalue())
523 523
524 524 # This should succeed, although no such module exists
525 525 self.shell.magic_aimport("-tmpmod_as318989e89ds")
526 526 else:
527 527 self.shell.magic_autoreload("0")
528 528
529 529 self.write_file(
530 530 mod_fn,
531 531 """
532 532 x = -99
533 533 """,
534 534 )
535 535
536 536 self.shell.run_code("pass") # trigger reload
537 537 self.shell.run_code("pass")
538 538 check_module_contents()
539 539
540 540 #
541 541 # Re-enable autoreload: reload should now occur
542 542 #
543 543 if use_aimport:
544 544 self.shell.magic_aimport(mod_name)
545 545 else:
546 546 self.shell.magic_autoreload("")
547 547
548 548 self.shell.run_code("pass") # trigger reload
549 549 nt.assert_equal(mod.x, -99)
550 550
551 551 def test_smoketest_aimport(self):
552 552 self._check_smoketest(use_aimport=True)
553 553
554 554 def test_smoketest_autoreload(self):
555 555 self._check_smoketest(use_aimport=False)
@@ -1,401 +1,401 b''
1 1 """
2 2 This module contains factory functions that attempt
3 3 to return Qt submodules from the various python Qt bindings.
4 4
5 5 It also protects against double-importing Qt with different
6 6 bindings, which is unstable and likely to crash
7 7
8 8 This is used primarily by qt and qt_for_kernel, and shouldn't
9 9 be accessed directly from the outside
10 10 """
11 11 import sys
12 12 import types
13 13 from functools import partial, lru_cache
14 14 import operator
15 15
16 16 from IPython.utils.version import check_version
17 17
18 18 # ### Available APIs.
19 19 # Qt6
20 20 QT_API_PYQT6 = "pyqt6"
21 21 QT_API_PYSIDE6 = "pyside6"
22 22
23 23 # Qt5
24 24 QT_API_PYQT5 = 'pyqt5'
25 25 QT_API_PYSIDE2 = 'pyside2'
26 26
27 27 # Qt4
28 28 QT_API_PYQT = "pyqt" # Force version 2
29 29 QT_API_PYQTv1 = "pyqtv1" # Force version 2
30 30 QT_API_PYSIDE = "pyside"
31 31
32 32 QT_API_PYQT_DEFAULT = "pyqtdefault" # use system default for version 1 vs. 2
33 33
34 34 api_to_module = {
35 35 # Qt6
36 36 QT_API_PYQT6: "PyQt6",
37 37 QT_API_PYSIDE6: "PySide6",
38 38 # Qt5
39 39 QT_API_PYQT5: "PyQt5",
40 40 QT_API_PYSIDE2: "PySide2",
41 41 # Qt4
42 42 QT_API_PYSIDE: "PySide",
43 43 QT_API_PYQT: "PyQt4",
44 44 QT_API_PYQTv1: "PyQt4",
45 45 # default
46 46 QT_API_PYQT_DEFAULT: "PyQt6",
47 47 }
48 48
49 49
50 50 class ImportDenier(object):
51 51 """Import Hook that will guard against bad Qt imports
52 52 once IPython commits to a specific binding
53 53 """
54 54
55 55 def __init__(self):
56 56 self.__forbidden = set()
57 57
58 58 def forbid(self, module_name):
59 59 sys.modules.pop(module_name, None)
60 60 self.__forbidden.add(module_name)
61 61
62 62 def find_module(self, fullname, path=None):
63 63 if path:
64 64 return
65 65 if fullname in self.__forbidden:
66 66 return self
67 67
68 68 def load_module(self, fullname):
69 69 raise ImportError("""
70 70 Importing %s disabled by IPython, which has
71 71 already imported an Incompatible QT Binding: %s
72 72 """ % (fullname, loaded_api()))
73 73
74 74
75 75 ID = ImportDenier()
76 76 sys.meta_path.insert(0, ID)
77 77
78 78
79 79 def commit_api(api):
80 80 """Commit to a particular API, and trigger ImportErrors on subsequent
81 81 dangerous imports"""
82 82 modules = set(api_to_module.values())
83 83
84 84 modules.remove(api_to_module[api])
85 85 for mod in modules:
86 86 ID.forbid(mod)
87 87
88 88
89 89 def loaded_api():
90 90 """Return which API is loaded, if any
91 91
92 92 If this returns anything besides None,
93 93 importing any other Qt binding is unsafe.
94 94
95 95 Returns
96 96 -------
97 97 None, 'pyside6', 'pyqt6', 'pyside2', 'pyside', 'pyqt', 'pyqt5', 'pyqtv1'
98 98 """
99 99 if sys.modules.get("PyQt6.QtCore"):
100 100 return QT_API_PYQT6
101 101 elif sys.modules.get("PySide6.QtCore"):
102 102 return QT_API_PYSIDE6
103 103 elif sys.modules.get("PyQt5.QtCore"):
104 104 return QT_API_PYQT5
105 105 elif sys.modules.get("PySide2.QtCore"):
106 106 return QT_API_PYSIDE2
107 107 elif sys.modules.get("PyQt4.QtCore"):
108 108 if qtapi_version() == 2:
109 109 return QT_API_PYQT
110 110 else:
111 111 return QT_API_PYQTv1
112 112 elif sys.modules.get("PySide.QtCore"):
113 113 return QT_API_PYSIDE
114 114
115 115 return None
116 116
117 117
118 118 def has_binding(api):
119 119 """Safely check for PyQt4/5, PySide or PySide2, without importing submodules
120 120
121 121 Parameters
122 122 ----------
123 123 api : str [ 'pyqtv1' | 'pyqt' | 'pyqt5' | 'pyside' | 'pyside2' | 'pyqtdefault']
124 124 Which module to check for
125 125
126 126 Returns
127 127 -------
128 128 True if the relevant module appears to be importable
129 129 """
130 130 module_name = api_to_module[api]
131 131 from importlib.util import find_spec
132 132
133 133 required = ['QtCore', 'QtGui', 'QtSvg']
134 134 if api in (QT_API_PYQT5, QT_API_PYSIDE2, QT_API_PYQT6, QT_API_PYSIDE6):
135 135 # QT5 requires QtWidgets too
136 136 required.append('QtWidgets')
137 137
138 138 for submod in required:
139 139 try:
140 140 spec = find_spec('%s.%s' % (module_name, submod))
141 141 except ImportError:
142 142 # Package (e.g. PyQt5) not found
143 143 return False
144 144 else:
145 145 if spec is None:
146 146 # Submodule (e.g. PyQt5.QtCore) not found
147 147 return False
148 148
149 149 if api == QT_API_PYSIDE:
150 150 # We can also safely check PySide version
151 151 import PySide
152 152 return check_version(PySide.__version__, '1.0.3')
153 153
154 154 return True
155 155
156 156
157 157 def qtapi_version():
158 158 """Return which QString API has been set, if any
159 159
160 160 Returns
161 161 -------
162 162 The QString API version (1 or 2), or None if not set
163 163 """
164 164 try:
165 165 import sip
166 166 except ImportError:
167 167 # as of PyQt5 5.11, sip is no longer available as a top-level
168 168 # module and needs to be imported from the PyQt5 namespace
169 169 try:
170 170 from PyQt5 import sip
171 171 except ImportError:
172 172 return
173 173 try:
174 174 return sip.getapi('QString')
175 175 except ValueError:
176 176 return
177 177
178 178
179 179 def can_import(api):
180 180 """Safely query whether an API is importable, without importing it"""
181 181 if not has_binding(api):
182 182 return False
183 183
184 184 current = loaded_api()
185 185 if api == QT_API_PYQT_DEFAULT:
186 186 return current in [QT_API_PYQT6, None]
187 187 else:
188 188 return current in [api, None]
189 189
190 190
191 191 def import_pyqt4(version=2):
192 192 """
193 193 Import PyQt4
194 194
195 195 Parameters
196 196 ----------
197 197 version : 1, 2, or None
198 198 Which QString/QVariant API to use. Set to None to use the system
199 199 default
200 200
201 ImportErrors rasied within this function are non-recoverable
201 ImportErrors raised within this function are non-recoverable
202 202 """
203 203 # The new-style string API (version=2) automatically
204 204 # converts QStrings to Unicode Python strings. Also, automatically unpacks
205 205 # QVariants to their underlying objects.
206 206 import sip
207 207
208 208 if version is not None:
209 209 sip.setapi('QString', version)
210 210 sip.setapi('QVariant', version)
211 211
212 212 from PyQt4 import QtGui, QtCore, QtSvg
213 213
214 214 if not check_version(QtCore.PYQT_VERSION_STR, '4.7'):
215 215 raise ImportError("IPython requires PyQt4 >= 4.7, found %s" %
216 216 QtCore.PYQT_VERSION_STR)
217 217
218 218 # Alias PyQt-specific functions for PySide compatibility.
219 219 QtCore.Signal = QtCore.pyqtSignal
220 220 QtCore.Slot = QtCore.pyqtSlot
221 221
222 222 # query for the API version (in case version == None)
223 223 version = sip.getapi('QString')
224 224 api = QT_API_PYQTv1 if version == 1 else QT_API_PYQT
225 225 return QtCore, QtGui, QtSvg, api
226 226
227 227
228 228 def import_pyqt5():
229 229 """
230 230 Import PyQt5
231 231
232 ImportErrors rasied within this function are non-recoverable
232 ImportErrors raised within this function are non-recoverable
233 233 """
234 234
235 235 from PyQt5 import QtCore, QtSvg, QtWidgets, QtGui
236 236
237 237 # Alias PyQt-specific functions for PySide compatibility.
238 238 QtCore.Signal = QtCore.pyqtSignal
239 239 QtCore.Slot = QtCore.pyqtSlot
240 240
241 241 # Join QtGui and QtWidgets for Qt4 compatibility.
242 242 QtGuiCompat = types.ModuleType('QtGuiCompat')
243 243 QtGuiCompat.__dict__.update(QtGui.__dict__)
244 244 QtGuiCompat.__dict__.update(QtWidgets.__dict__)
245 245
246 246 api = QT_API_PYQT5
247 247 return QtCore, QtGuiCompat, QtSvg, api
248 248
249 249
250 250 def import_pyqt6():
251 251 """
252 252 Import PyQt6
253 253
254 ImportErrors rasied within this function are non-recoverable
254 ImportErrors raised within this function are non-recoverable
255 255 """
256 256
257 257 from PyQt6 import QtCore, QtSvg, QtWidgets, QtGui
258 258
259 259 # Alias PyQt-specific functions for PySide compatibility.
260 260 QtCore.Signal = QtCore.pyqtSignal
261 261 QtCore.Slot = QtCore.pyqtSlot
262 262
263 263 # Join QtGui and QtWidgets for Qt4 compatibility.
264 264 QtGuiCompat = types.ModuleType("QtGuiCompat")
265 265 QtGuiCompat.__dict__.update(QtGui.__dict__)
266 266 QtGuiCompat.__dict__.update(QtWidgets.__dict__)
267 267
268 268 api = QT_API_PYQT6
269 269 return QtCore, QtGuiCompat, QtSvg, api
270 270
271 271
272 272 def import_pyside():
273 273 """
274 274 Import PySide
275 275
276 276 ImportErrors raised within this function are non-recoverable
277 277 """
278 278 from PySide import QtGui, QtCore, QtSvg
279 279 return QtCore, QtGui, QtSvg, QT_API_PYSIDE
280 280
281 281 def import_pyside2():
282 282 """
283 283 Import PySide2
284 284
285 285 ImportErrors raised within this function are non-recoverable
286 286 """
287 287 from PySide2 import QtGui, QtCore, QtSvg, QtWidgets, QtPrintSupport
288 288
289 289 # Join QtGui and QtWidgets for Qt4 compatibility.
290 290 QtGuiCompat = types.ModuleType('QtGuiCompat')
291 291 QtGuiCompat.__dict__.update(QtGui.__dict__)
292 292 QtGuiCompat.__dict__.update(QtWidgets.__dict__)
293 293 QtGuiCompat.__dict__.update(QtPrintSupport.__dict__)
294 294
295 295 return QtCore, QtGuiCompat, QtSvg, QT_API_PYSIDE2
296 296
297 297
298 298 def import_pyside6():
299 299 """
300 300 Import PySide6
301 301
302 302 ImportErrors raised within this function are non-recoverable
303 303 """
304 304 from PySide6 import QtGui, QtCore, QtSvg, QtWidgets, QtPrintSupport
305 305
306 306 # Join QtGui and QtWidgets for Qt4 compatibility.
307 307 QtGuiCompat = types.ModuleType("QtGuiCompat")
308 308 QtGuiCompat.__dict__.update(QtGui.__dict__)
309 309 QtGuiCompat.__dict__.update(QtWidgets.__dict__)
310 310 QtGuiCompat.__dict__.update(QtPrintSupport.__dict__)
311 311
312 312 return QtCore, QtGuiCompat, QtSvg, QT_API_PYSIDE6
313 313
314 314
315 315 def load_qt(api_options):
316 316 """
317 317 Attempt to import Qt, given a preference list
318 318 of permissible bindings
319 319
320 320 It is safe to call this function multiple times.
321 321
322 322 Parameters
323 323 ----------
324 324 api_options: List of strings
325 325 The order of APIs to try. Valid items are 'pyside', 'pyside2',
326 326 'pyqt', 'pyqt5', 'pyqtv1' and 'pyqtdefault'
327 327
328 328 Returns
329 329 -------
330 330
331 331 A tuple of QtCore, QtGui, QtSvg, QT_API
332 332 The first three are the Qt modules. The last is the
333 333 string indicating which module was loaded.
334 334
335 335 Raises
336 336 ------
337 337 ImportError, if it isn't possible to import any requested
338 338 bindings (either because they aren't installed, or because
339 339 an incompatible library has already been installed)
340 340 """
341 341 loaders = {
342 342 # Qt6
343 343 QT_API_PYQT6: import_pyqt6,
344 344 QT_API_PYSIDE6: import_pyside6,
345 345 # Qt5
346 346 QT_API_PYQT5: import_pyqt5,
347 347 QT_API_PYSIDE2: import_pyside2,
348 348 # Qt4
349 349 QT_API_PYSIDE: import_pyside,
350 350 QT_API_PYQT: import_pyqt4,
351 351 QT_API_PYQTv1: partial(import_pyqt4, version=1),
352 352 # default
353 353 QT_API_PYQT_DEFAULT: import_pyqt6,
354 354 }
355 355
356 356 for api in api_options:
357 357
358 358 if api not in loaders:
359 359 raise RuntimeError(
360 360 "Invalid Qt API %r, valid values are: %s" %
361 361 (api, ", ".join(["%r" % k for k in loaders.keys()])))
362 362
363 363 if not can_import(api):
364 364 continue
365 365
366 366 #cannot safely recover from an ImportError during this
367 367 result = loaders[api]()
368 368 api = result[-1] # changed if api = QT_API_PYQT_DEFAULT
369 369 commit_api(api)
370 370 return result
371 371 else:
372 372 raise ImportError("""
373 373 Could not load requested Qt binding. Please ensure that
374 374 PyQt4 >= 4.7, PyQt5, PySide >= 1.0.3 or PySide2 is available,
375 375 and only one is imported per session.
376 376
377 377 Currently-imported Qt library: %r
378 378 PyQt4 available (requires QtCore, QtGui, QtSvg): %s
379 379 PyQt5 available (requires QtCore, QtGui, QtSvg, QtWidgets): %s
380 380 PySide >= 1.0.3 installed: %s
381 381 PySide2 installed: %s
382 382 Tried to load: %r
383 383 """ % (loaded_api(),
384 384 has_binding(QT_API_PYQT),
385 385 has_binding(QT_API_PYQT5),
386 386 has_binding(QT_API_PYSIDE),
387 387 has_binding(QT_API_PYSIDE2),
388 388 api_options))
389 389
390 390
391 391 def enum_factory(QT_API, QtCore):
392 392 """Construct an enum helper to account for PyQt5 <-> PyQt6 changes."""
393 393
394 394 @lru_cache(None)
395 395 def _enum(name):
396 396 # foo.bar.Enum.Entry (PyQt6) <=> foo.bar.Entry (non-PyQt6).
397 397 return operator.attrgetter(
398 398 name if QT_API == QT_API_PYQT6 else name.rpartition(".")[0]
399 399 )(sys.modules[QtCore.__package__])
400 400
401 401 return _enum
@@ -1,491 +1,491 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Manage background (threaded) jobs conveniently from an interactive shell.
3 3
4 4 This module provides a BackgroundJobManager class. This is the main class
5 5 meant for public usage, it implements an object which can create and manage
6 6 new background jobs.
7 7
8 8 It also provides the actual job classes managed by these BackgroundJobManager
9 9 objects, see their docstrings below.
10 10
11 11
12 12 This system was inspired by discussions with B. Granger and the
13 13 BackgroundCommand class described in the book Python Scripting for
14 14 Computational Science, by H. P. Langtangen:
15 15
16 16 http://folk.uio.no/hpl/scripting
17 17
18 18 (although ultimately no code from this text was used, as IPython's system is a
19 19 separate implementation).
20 20
21 21 An example notebook is provided in our documentation illustrating interactive
22 22 use of the system.
23 23 """
24 24
25 25 #*****************************************************************************
26 26 # Copyright (C) 2005-2006 Fernando Perez <fperez@colorado.edu>
27 27 #
28 28 # Distributed under the terms of the BSD License. The full license is in
29 29 # the file COPYING, distributed as part of this software.
30 30 #*****************************************************************************
31 31
32 32 # Code begins
33 33 import sys
34 34 import threading
35 35
36 36 from IPython import get_ipython
37 37 from IPython.core.ultratb import AutoFormattedTB
38 38 from logging import error, debug
39 39
40 40
41 41 class BackgroundJobManager(object):
42 42 """Class to manage a pool of backgrounded threaded jobs.
43 43
44 44 Below, we assume that 'jobs' is a BackgroundJobManager instance.
45 45
46 46 Usage summary (see the method docstrings for details):
47 47
48 48 jobs.new(...) -> start a new job
49 49
50 50 jobs() or jobs.status() -> print status summary of all jobs
51 51
52 52 jobs[N] -> returns job number N.
53 53
54 54 foo = jobs[N].result -> assign to variable foo the result of job N
55 55
56 56 jobs[N].traceback() -> print the traceback of dead job N
57 57
58 58 jobs.remove(N) -> remove (finished) job N
59 59
60 60 jobs.flush() -> remove all finished jobs
61 61
62 62 As a convenience feature, BackgroundJobManager instances provide the
63 63 utility result and traceback methods which retrieve the corresponding
64 64 information from the jobs list:
65 65
66 66 jobs.result(N) <--> jobs[N].result
67 67 jobs.traceback(N) <--> jobs[N].traceback()
68 68
69 69 While this appears minor, it allows you to use tab completion
70 70 interactively on the job manager instance.
71 71 """
72 72
73 73 def __init__(self):
74 74 # Lists for job management, accessed via a property to ensure they're
75 75 # up to date.x
76 76 self._running = []
77 77 self._completed = []
78 78 self._dead = []
79 79 # A dict of all jobs, so users can easily access any of them
80 80 self.all = {}
81 81 # For reporting
82 82 self._comp_report = []
83 83 self._dead_report = []
84 84 # Store status codes locally for fast lookups
85 85 self._s_created = BackgroundJobBase.stat_created_c
86 86 self._s_running = BackgroundJobBase.stat_running_c
87 87 self._s_completed = BackgroundJobBase.stat_completed_c
88 88 self._s_dead = BackgroundJobBase.stat_dead_c
89 89 self._current_job_id = 0
90 90
91 91 @property
92 92 def running(self):
93 93 self._update_status()
94 94 return self._running
95 95
96 96 @property
97 97 def dead(self):
98 98 self._update_status()
99 99 return self._dead
100 100
101 101 @property
102 102 def completed(self):
103 103 self._update_status()
104 104 return self._completed
105 105
106 106 def new(self, func_or_exp, *args, **kwargs):
107 107 """Add a new background job and start it in a separate thread.
108 108
109 109 There are two types of jobs which can be created:
110 110
111 111 1. Jobs based on expressions which can be passed to an eval() call.
112 112 The expression must be given as a string. For example:
113 113
114 114 job_manager.new('myfunc(x,y,z=1)'[,glob[,loc]])
115 115
116 116 The given expression is passed to eval(), along with the optional
117 117 global/local dicts provided. If no dicts are given, they are
118 118 extracted automatically from the caller's frame.
119 119
120 120 A Python statement is NOT a valid eval() expression. Basically, you
121 121 can only use as an eval() argument something which can go on the right
122 122 of an '=' sign and be assigned to a variable.
123 123
124 124 For example,"print 'hello'" is not valid, but '2+3' is.
125 125
126 126 2. Jobs given a function object, optionally passing additional
127 127 positional arguments:
128 128
129 129 job_manager.new(myfunc, x, y)
130 130
131 131 The function is called with the given arguments.
132 132
133 133 If you need to pass keyword arguments to your function, you must
134 134 supply them as a dict named kw:
135 135
136 136 job_manager.new(myfunc, x, y, kw=dict(z=1))
137 137
138 The reason for this assymmetry is that the new() method needs to
138 The reason for this asymmetry is that the new() method needs to
139 139 maintain access to its own keywords, and this prevents name collisions
140 140 between arguments to new() and arguments to your own functions.
141 141
142 142 In both cases, the result is stored in the job.result field of the
143 143 background job object.
144 144
145 145 You can set `daemon` attribute of the thread by giving the keyword
146 146 argument `daemon`.
147 147
148 148 Notes and caveats:
149 149
150 150 1. All threads running share the same standard output. Thus, if your
151 151 background jobs generate output, it will come out on top of whatever
152 152 you are currently writing. For this reason, background jobs are best
153 153 used with silent functions which simply return their output.
154 154
155 155 2. Threads also all work within the same global namespace, and this
156 156 system does not lock interactive variables. So if you send job to the
157 157 background which operates on a mutable object for a long time, and
158 158 start modifying that same mutable object interactively (or in another
159 159 backgrounded job), all sorts of bizarre behaviour will occur.
160 160
161 161 3. If a background job is spending a lot of time inside a C extension
162 162 module which does not release the Python Global Interpreter Lock
163 163 (GIL), this will block the IPython prompt. This is simply because the
164 164 Python interpreter can only switch between threads at Python
165 165 bytecodes. While the execution is inside C code, the interpreter must
166 166 simply wait unless the extension module releases the GIL.
167 167
168 168 4. There is no way, due to limitations in the Python threads library,
169 169 to kill a thread once it has started."""
170 170
171 171 if callable(func_or_exp):
172 172 kw = kwargs.get('kw',{})
173 173 job = BackgroundJobFunc(func_or_exp,*args,**kw)
174 174 elif isinstance(func_or_exp, str):
175 175 if not args:
176 176 frame = sys._getframe(1)
177 177 glob, loc = frame.f_globals, frame.f_locals
178 178 elif len(args)==1:
179 179 glob = loc = args[0]
180 180 elif len(args)==2:
181 181 glob,loc = args
182 182 else:
183 183 raise ValueError(
184 184 'Expression jobs take at most 2 args (globals,locals)')
185 185 job = BackgroundJobExpr(func_or_exp, glob, loc)
186 186 else:
187 187 raise TypeError('invalid args for new job')
188 188
189 189 if kwargs.get('daemon', False):
190 190 job.daemon = True
191 191 job.num = self._current_job_id
192 192 self._current_job_id += 1
193 193 self.running.append(job)
194 194 self.all[job.num] = job
195 195 debug('Starting job # %s in a separate thread.' % job.num)
196 196 job.start()
197 197 return job
198 198
199 199 def __getitem__(self, job_key):
200 200 num = job_key if isinstance(job_key, int) else job_key.num
201 201 return self.all[num]
202 202
203 203 def __call__(self):
204 204 """An alias to self.status(),
205 205
206 206 This allows you to simply call a job manager instance much like the
207 207 Unix `jobs` shell command."""
208 208
209 209 return self.status()
210 210
211 211 def _update_status(self):
212 212 """Update the status of the job lists.
213 213
214 214 This method moves finished jobs to one of two lists:
215 215 - self.completed: jobs which completed successfully
216 216 - self.dead: jobs which finished but died.
217 217
218 218 It also copies those jobs to corresponding _report lists. These lists
219 219 are used to report jobs completed/dead since the last update, and are
220 220 then cleared by the reporting function after each call."""
221 221
222 222 # Status codes
223 223 srun, scomp, sdead = self._s_running, self._s_completed, self._s_dead
224 224 # State lists, use the actual lists b/c the public names are properties
225 225 # that call this very function on access
226 226 running, completed, dead = self._running, self._completed, self._dead
227 227
228 228 # Now, update all state lists
229 229 for num, job in enumerate(running):
230 230 stat = job.stat_code
231 231 if stat == srun:
232 232 continue
233 233 elif stat == scomp:
234 234 completed.append(job)
235 235 self._comp_report.append(job)
236 236 running[num] = False
237 237 elif stat == sdead:
238 238 dead.append(job)
239 239 self._dead_report.append(job)
240 240 running[num] = False
241 241 # Remove dead/completed jobs from running list
242 242 running[:] = filter(None, running)
243 243
244 244 def _group_report(self,group,name):
245 245 """Report summary for a given job group.
246 246
247 247 Return True if the group had any elements."""
248 248
249 249 if group:
250 250 print('%s jobs:' % name)
251 251 for job in group:
252 252 print('%s : %s' % (job.num,job))
253 253 print()
254 254 return True
255 255
256 256 def _group_flush(self,group,name):
257 257 """Flush a given job group
258 258
259 259 Return True if the group had any elements."""
260 260
261 261 njobs = len(group)
262 262 if njobs:
263 263 plural = {1:''}.setdefault(njobs,'s')
264 264 print('Flushing %s %s job%s.' % (njobs,name,plural))
265 265 group[:] = []
266 266 return True
267 267
268 268 def _status_new(self):
269 269 """Print the status of newly finished jobs.
270 270
271 271 Return True if any new jobs are reported.
272 272
273 273 This call resets its own state every time, so it only reports jobs
274 274 which have finished since the last time it was called."""
275 275
276 276 self._update_status()
277 277 new_comp = self._group_report(self._comp_report, 'Completed')
278 278 new_dead = self._group_report(self._dead_report,
279 279 'Dead, call jobs.traceback() for details')
280 280 self._comp_report[:] = []
281 281 self._dead_report[:] = []
282 282 return new_comp or new_dead
283 283
284 284 def status(self,verbose=0):
285 285 """Print a status of all jobs currently being managed."""
286 286
287 287 self._update_status()
288 288 self._group_report(self.running,'Running')
289 289 self._group_report(self.completed,'Completed')
290 290 self._group_report(self.dead,'Dead')
291 291 # Also flush the report queues
292 292 self._comp_report[:] = []
293 293 self._dead_report[:] = []
294 294
295 295 def remove(self,num):
296 296 """Remove a finished (completed or dead) job."""
297 297
298 298 try:
299 299 job = self.all[num]
300 300 except KeyError:
301 301 error('Job #%s not found' % num)
302 302 else:
303 303 stat_code = job.stat_code
304 304 if stat_code == self._s_running:
305 305 error('Job #%s is still running, it can not be removed.' % num)
306 306 return
307 307 elif stat_code == self._s_completed:
308 308 self.completed.remove(job)
309 309 elif stat_code == self._s_dead:
310 310 self.dead.remove(job)
311 311
312 312 def flush(self):
313 313 """Flush all finished jobs (completed and dead) from lists.
314 314
315 315 Running jobs are never flushed.
316 316
317 317 It first calls _status_new(), to update info. If any jobs have
318 318 completed since the last _status_new() call, the flush operation
319 319 aborts."""
320 320
321 321 # Remove the finished jobs from the master dict
322 322 alljobs = self.all
323 323 for job in self.completed+self.dead:
324 324 del(alljobs[job.num])
325 325
326 326 # Now flush these lists completely
327 327 fl_comp = self._group_flush(self.completed, 'Completed')
328 328 fl_dead = self._group_flush(self.dead, 'Dead')
329 329 if not (fl_comp or fl_dead):
330 330 print('No jobs to flush.')
331 331
332 332 def result(self,num):
333 333 """result(N) -> return the result of job N."""
334 334 try:
335 335 return self.all[num].result
336 336 except KeyError:
337 337 error('Job #%s not found' % num)
338 338
339 339 def _traceback(self, job):
340 340 num = job if isinstance(job, int) else job.num
341 341 try:
342 342 self.all[num].traceback()
343 343 except KeyError:
344 344 error('Job #%s not found' % num)
345 345
346 346 def traceback(self, job=None):
347 347 if job is None:
348 348 self._update_status()
349 349 for deadjob in self.dead:
350 350 print("Traceback for: %r" % deadjob)
351 351 self._traceback(deadjob)
352 352 print()
353 353 else:
354 354 self._traceback(job)
355 355
356 356
357 357 class BackgroundJobBase(threading.Thread):
358 358 """Base class to build BackgroundJob classes.
359 359
360 360 The derived classes must implement:
361 361
362 362 - Their own __init__, since the one here raises NotImplementedError. The
363 363 derived constructor must call self._init() at the end, to provide common
364 364 initialization.
365 365
366 366 - A strform attribute used in calls to __str__.
367 367
368 368 - A call() method, which will make the actual execution call and must
369 369 return a value to be held in the 'result' field of the job object.
370 370 """
371 371
372 372 # Class constants for status, in string and as numerical codes (when
373 373 # updating jobs lists, we don't want to do string comparisons). This will
374 374 # be done at every user prompt, so it has to be as fast as possible
375 375 stat_created = 'Created'; stat_created_c = 0
376 376 stat_running = 'Running'; stat_running_c = 1
377 377 stat_completed = 'Completed'; stat_completed_c = 2
378 378 stat_dead = 'Dead (Exception), call jobs.traceback() for details'
379 379 stat_dead_c = -1
380 380
381 381 def __init__(self):
382 382 """Must be implemented in subclasses.
383 383
384 384 Subclasses must call :meth:`_init` for standard initialisation.
385 385 """
386 386 raise NotImplementedError("This class can not be instantiated directly.")
387 387
388 388 def _init(self):
389 389 """Common initialization for all BackgroundJob objects"""
390 390
391 391 for attr in ['call','strform']:
392 392 assert hasattr(self,attr), "Missing attribute <%s>" % attr
393 393
394 394 # The num tag can be set by an external job manager
395 395 self.num = None
396 396
397 397 self.status = BackgroundJobBase.stat_created
398 398 self.stat_code = BackgroundJobBase.stat_created_c
399 399 self.finished = False
400 400 self.result = '<BackgroundJob has not completed>'
401 401
402 402 # reuse the ipython traceback handler if we can get to it, otherwise
403 403 # make a new one
404 404 try:
405 405 make_tb = get_ipython().InteractiveTB.text
406 406 except:
407 407 make_tb = AutoFormattedTB(mode = 'Context',
408 408 color_scheme='NoColor',
409 409 tb_offset = 1).text
410 410 # Note that the actual API for text() requires the three args to be
411 411 # passed in, so we wrap it in a simple lambda.
412 412 self._make_tb = lambda : make_tb(None, None, None)
413 413
414 414 # Hold a formatted traceback if one is generated.
415 415 self._tb = None
416 416
417 417 threading.Thread.__init__(self)
418 418
419 419 def __str__(self):
420 420 return self.strform
421 421
422 422 def __repr__(self):
423 423 return '<BackgroundJob #%d: %s>' % (self.num, self.strform)
424 424
425 425 def traceback(self):
426 426 print(self._tb)
427 427
428 428 def run(self):
429 429 try:
430 430 self.status = BackgroundJobBase.stat_running
431 431 self.stat_code = BackgroundJobBase.stat_running_c
432 432 self.result = self.call()
433 433 except:
434 434 self.status = BackgroundJobBase.stat_dead
435 435 self.stat_code = BackgroundJobBase.stat_dead_c
436 436 self.finished = None
437 437 self.result = ('<BackgroundJob died, call jobs.traceback() for details>')
438 438 self._tb = self._make_tb()
439 439 else:
440 440 self.status = BackgroundJobBase.stat_completed
441 441 self.stat_code = BackgroundJobBase.stat_completed_c
442 442 self.finished = True
443 443
444 444
445 445 class BackgroundJobExpr(BackgroundJobBase):
446 446 """Evaluate an expression as a background job (uses a separate thread)."""
447 447
448 448 def __init__(self, expression, glob=None, loc=None):
449 449 """Create a new job from a string which can be fed to eval().
450 450
451 451 global/locals dicts can be provided, which will be passed to the eval
452 452 call."""
453 453
454 454 # fail immediately if the given expression can't be compiled
455 455 self.code = compile(expression,'<BackgroundJob compilation>','eval')
456 456
457 457 glob = {} if glob is None else glob
458 458 loc = {} if loc is None else loc
459 459 self.expression = self.strform = expression
460 460 self.glob = glob
461 461 self.loc = loc
462 462 self._init()
463 463
464 464 def call(self):
465 465 return eval(self.code,self.glob,self.loc)
466 466
467 467
468 468 class BackgroundJobFunc(BackgroundJobBase):
469 469 """Run a function call as a background job (uses a separate thread)."""
470 470
471 471 def __init__(self, func, *args, **kwargs):
472 472 """Create a new job from a callable object.
473 473
474 474 Any positional arguments and keyword args given to this constructor
475 475 after the initial callable are passed directly to it."""
476 476
477 477 if not callable(func):
478 478 raise TypeError(
479 479 'first argument to BackgroundJobFunc must be callable')
480 480
481 481 self.func = func
482 482 self.args = args
483 483 self.kwargs = kwargs
484 484 # The string form will only include the function passed, because
485 485 # generating string representations of the arguments is a potentially
486 486 # _very_ expensive operation (e.g. with large arrays).
487 487 self.strform = str(func)
488 488 self._init()
489 489
490 490 def call(self):
491 491 return self.func(*self.args, **self.kwargs)
@@ -1,672 +1,672 b''
1 1 """Module for interactive demos using IPython.
2 2
3 3 This module implements a few classes for running Python scripts interactively
4 4 in IPython for demonstrations. With very simple markup (a few tags in
5 5 comments), you can control points where the script stops executing and returns
6 6 control to IPython.
7 7
8 8
9 9 Provided classes
10 10 ----------------
11 11
12 12 The classes are (see their docstrings for further details):
13 13
14 14 - Demo: pure python demos
15 15
16 16 - IPythonDemo: demos with input to be processed by IPython as if it had been
17 17 typed interactively (so magics work, as well as any other special syntax you
18 18 may have added via input prefilters).
19 19
20 20 - LineDemo: single-line version of the Demo class. These demos are executed
21 21 one line at a time, and require no markup.
22 22
23 23 - IPythonLineDemo: IPython version of the LineDemo class (the demo is
24 24 executed a line at a time, but processed via IPython).
25 25
26 26 - ClearMixin: mixin to make Demo classes with less visual clutter. It
27 27 declares an empty marquee and a pre_cmd that clears the screen before each
28 28 block (see Subclassing below).
29 29
30 30 - ClearDemo, ClearIPDemo: mixin-enabled versions of the Demo and IPythonDemo
31 31 classes.
32 32
33 33 Inheritance diagram:
34 34
35 35 .. inheritance-diagram:: IPython.lib.demo
36 36 :parts: 3
37 37
38 38 Subclassing
39 39 -----------
40 40
41 41 The classes here all include a few methods meant to make customization by
42 42 subclassing more convenient. Their docstrings below have some more details:
43 43
44 44 - highlight(): format every block and optionally highlight comments and
45 45 docstring content.
46 46
47 47 - marquee(): generates a marquee to provide visible on-screen markers at each
48 48 block start and end.
49 49
50 50 - pre_cmd(): run right before the execution of each block.
51 51
52 52 - post_cmd(): run right after the execution of each block. If the block
53 53 raises an exception, this is NOT called.
54 54
55 55
56 56 Operation
57 57 ---------
58 58
59 59 The file is run in its own empty namespace (though you can pass it a string of
60 60 arguments as if in a command line environment, and it will see those as
61 61 sys.argv). But at each stop, the global IPython namespace is updated with the
62 62 current internal demo namespace, so you can work interactively with the data
63 63 accumulated so far.
64 64
65 65 By default, each block of code is printed (with syntax highlighting) before
66 66 executing it and you have to confirm execution. This is intended to show the
67 67 code to an audience first so you can discuss it, and only proceed with
68 68 execution once you agree. There are a few tags which allow you to modify this
69 69 behavior.
70 70
71 71 The supported tags are:
72 72
73 73 # <demo> stop
74 74
75 75 Defines block boundaries, the points where IPython stops execution of the
76 76 file and returns to the interactive prompt.
77 77
78 78 You can optionally mark the stop tag with extra dashes before and after the
79 79 word 'stop', to help visually distinguish the blocks in a text editor:
80 80
81 81 # <demo> --- stop ---
82 82
83 83
84 84 # <demo> silent
85 85
86 86 Make a block execute silently (and hence automatically). Typically used in
87 87 cases where you have some boilerplate or initialization code which you need
88 88 executed but do not want to be seen in the demo.
89 89
90 90 # <demo> auto
91 91
92 92 Make a block execute automatically, but still being printed. Useful for
93 93 simple code which does not warrant discussion, since it avoids the extra
94 94 manual confirmation.
95 95
96 96 # <demo> auto_all
97 97
98 98 This tag can _only_ be in the first block, and if given it overrides the
99 99 individual auto tags to make the whole demo fully automatic (no block asks
100 100 for confirmation). It can also be given at creation time (or the attribute
101 101 set later) to override what's in the file.
102 102
103 103 While _any_ python file can be run as a Demo instance, if there are no stop
104 104 tags the whole file will run in a single block (no different that calling
105 105 first %pycat and then %run). The minimal markup to make this useful is to
106 106 place a set of stop tags; the other tags are only there to let you fine-tune
107 107 the execution.
108 108
109 109 This is probably best explained with the simple example file below. You can
110 110 copy this into a file named ex_demo.py, and try running it via::
111 111
112 112 from IPython.lib.demo import Demo
113 113 d = Demo('ex_demo.py')
114 114 d()
115 115
116 116 Each time you call the demo object, it runs the next block. The demo object
117 117 has a few useful methods for navigation, like again(), edit(), jump(), seek()
118 118 and back(). It can be reset for a new run via reset() or reloaded from disk
119 119 (in case you've edited the source) via reload(). See their docstrings below.
120 120
121 121 Note: To make this simpler to explore, a file called "demo-exercizer.py" has
122 122 been added to the "docs/examples/core" directory. Just cd to this directory in
123 123 an IPython session, and type::
124 124
125 125 %run demo-exercizer.py
126 126
127 127 and then follow the directions.
128 128
129 129 Example
130 130 -------
131 131
132 132 The following is a very simple example of a valid demo file.
133 133
134 134 ::
135 135
136 136 #################### EXAMPLE DEMO <ex_demo.py> ###############################
137 137 '''A simple interactive demo to illustrate the use of IPython's Demo class.'''
138 138
139 139 print 'Hello, welcome to an interactive IPython demo.'
140 140
141 141 # The mark below defines a block boundary, which is a point where IPython will
142 142 # stop execution and return to the interactive prompt. The dashes are actually
143 143 # optional and used only as a visual aid to clearly separate blocks while
144 144 # editing the demo code.
145 145 # <demo> stop
146 146
147 147 x = 1
148 148 y = 2
149 149
150 150 # <demo> stop
151 151
152 152 # the mark below makes this block as silent
153 153 # <demo> silent
154 154
155 155 print 'This is a silent block, which gets executed but not printed.'
156 156
157 157 # <demo> stop
158 158 # <demo> auto
159 159 print 'This is an automatic block.'
160 160 print 'It is executed without asking for confirmation, but printed.'
161 161 z = x+y
162 162
163 163 print 'z=',x
164 164
165 165 # <demo> stop
166 166 # This is just another normal block.
167 167 print 'z is now:', z
168 168
169 169 print 'bye!'
170 170 ################### END EXAMPLE DEMO <ex_demo.py> ############################
171 171 """
172 172
173 173
174 174 #*****************************************************************************
175 175 # Copyright (C) 2005-2006 Fernando Perez. <Fernando.Perez@colorado.edu>
176 176 #
177 177 # Distributed under the terms of the BSD License. The full license is in
178 178 # the file COPYING, distributed as part of this software.
179 179 #
180 180 #*****************************************************************************
181 181
182 182 import os
183 183 import re
184 184 import shlex
185 185 import sys
186 186 import pygments
187 187 from pathlib import Path
188 188
189 189 from IPython.utils.text import marquee
190 190 from IPython.utils import openpy
191 191 from IPython.utils import py3compat
192 192 __all__ = ['Demo','IPythonDemo','LineDemo','IPythonLineDemo','DemoError']
193 193
194 194 class DemoError(Exception): pass
195 195
196 196 def re_mark(mark):
197 197 return re.compile(r'^\s*#\s+<demo>\s+%s\s*$' % mark,re.MULTILINE)
198 198
199 199 class Demo(object):
200 200
201 201 re_stop = re_mark(r'-*\s?stop\s?-*')
202 202 re_silent = re_mark('silent')
203 203 re_auto = re_mark('auto')
204 204 re_auto_all = re_mark('auto_all')
205 205
206 206 def __init__(self,src,title='',arg_str='',auto_all=None, format_rst=False,
207 207 formatter='terminal', style='default'):
208 208 """Make a new demo object. To run the demo, simply call the object.
209 209
210 210 See the module docstring for full details and an example (you can use
211 211 IPython.Demo? in IPython to see it).
212 212
213 213 Inputs:
214 214
215 215 - src is either a file, or file-like object, or a
216 216 string that can be resolved to a filename.
217 217
218 218 Optional inputs:
219 219
220 220 - title: a string to use as the demo name. Of most use when the demo
221 221 you are making comes from an object that has no filename, or if you
222 222 want an alternate denotation distinct from the filename.
223 223
224 224 - arg_str(''): a string of arguments, internally converted to a list
225 225 just like sys.argv, so the demo script can see a similar
226 226 environment.
227 227
228 228 - auto_all(None): global flag to run all blocks automatically without
229 229 confirmation. This attribute overrides the block-level tags and
230 230 applies to the whole demo. It is an attribute of the object, and
231 231 can be changed at runtime simply by reassigning it to a boolean
232 232 value.
233 233
234 234 - format_rst(False): a bool to enable comments and doc strings
235 235 formatting with pygments rst lexer
236 236
237 237 - formatter('terminal'): a string of pygments formatter name to be
238 238 used. Useful values for terminals: terminal, terminal256,
239 239 terminal16m
240 240
241 241 - style('default'): a string of pygments style name to be used.
242 242 """
243 243 if hasattr(src, "read"):
244 244 # It seems to be a file or a file-like object
245 245 self.fname = "from a file-like object"
246 246 if title == '':
247 247 self.title = "from a file-like object"
248 248 else:
249 249 self.title = title
250 250 else:
251 251 # Assume it's a string or something that can be converted to one
252 252 self.fname = src
253 253 if title == '':
254 254 (filepath, filename) = os.path.split(src)
255 255 self.title = filename
256 256 else:
257 257 self.title = title
258 258 self.sys_argv = [src] + shlex.split(arg_str)
259 259 self.auto_all = auto_all
260 260 self.src = src
261 261
262 262 try:
263 263 ip = get_ipython() # this is in builtins whenever IPython is running
264 264 self.inside_ipython = True
265 265 except NameError:
266 266 self.inside_ipython = False
267 267
268 268 if self.inside_ipython:
269 269 # get a few things from ipython. While it's a bit ugly design-wise,
270 270 # it ensures that things like color scheme and the like are always in
271 271 # sync with the ipython mode being used. This class is only meant to
272 272 # be used inside ipython anyways, so it's OK.
273 273 self.ip_ns = ip.user_ns
274 274 self.ip_colorize = ip.pycolorize
275 275 self.ip_showtb = ip.showtraceback
276 276 self.ip_run_cell = ip.run_cell
277 277 self.shell = ip
278 278
279 279 self.formatter = pygments.formatters.get_formatter_by_name(formatter,
280 280 style=style)
281 281 self.python_lexer = pygments.lexers.get_lexer_by_name("py3")
282 282 self.format_rst = format_rst
283 283 if format_rst:
284 284 self.rst_lexer = pygments.lexers.get_lexer_by_name("rst")
285 285
286 286 # load user data and initialize data structures
287 287 self.reload()
288 288
289 289 def fload(self):
290 290 """Load file object."""
291 291 # read data and parse into blocks
292 292 if hasattr(self, 'fobj') and self.fobj is not None:
293 293 self.fobj.close()
294 294 if hasattr(self.src, "read"):
295 295 # It seems to be a file or a file-like object
296 296 self.fobj = self.src
297 297 else:
298 298 # Assume it's a string or something that can be converted to one
299 299 self.fobj = openpy.open(self.fname)
300 300
301 301 def reload(self):
302 302 """Reload source from disk and initialize state."""
303 303 self.fload()
304 304
305 305 self.src = "".join(openpy.strip_encoding_cookie(self.fobj))
306 306 src_b = [b.strip() for b in self.re_stop.split(self.src) if b]
307 307 self._silent = [bool(self.re_silent.findall(b)) for b in src_b]
308 308 self._auto = [bool(self.re_auto.findall(b)) for b in src_b]
309 309
310 310 # if auto_all is not given (def. None), we read it from the file
311 311 if self.auto_all is None:
312 312 self.auto_all = bool(self.re_auto_all.findall(src_b[0]))
313 313 else:
314 314 self.auto_all = bool(self.auto_all)
315 315
316 316 # Clean the sources from all markup so it doesn't get displayed when
317 317 # running the demo
318 318 src_blocks = []
319 319 auto_strip = lambda s: self.re_auto.sub('',s)
320 320 for i,b in enumerate(src_b):
321 321 if self._auto[i]:
322 322 src_blocks.append(auto_strip(b))
323 323 else:
324 324 src_blocks.append(b)
325 325 # remove the auto_all marker
326 326 src_blocks[0] = self.re_auto_all.sub('',src_blocks[0])
327 327
328 328 self.nblocks = len(src_blocks)
329 329 self.src_blocks = src_blocks
330 330
331 331 # also build syntax-highlighted source
332 332 self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
333 333
334 334 # ensure clean namespace and seek offset
335 335 self.reset()
336 336
337 337 def reset(self):
338 338 """Reset the namespace and seek pointer to restart the demo"""
339 339 self.user_ns = {}
340 340 self.finished = False
341 341 self.block_index = 0
342 342
343 343 def _validate_index(self,index):
344 344 if index<0 or index>=self.nblocks:
345 345 raise ValueError('invalid block index %s' % index)
346 346
347 347 def _get_index(self,index):
348 348 """Get the current block index, validating and checking status.
349 349
350 350 Returns None if the demo is finished"""
351 351
352 352 if index is None:
353 353 if self.finished:
354 354 print('Demo finished. Use <demo_name>.reset() if you want to rerun it.')
355 355 return None
356 356 index = self.block_index
357 357 else:
358 358 self._validate_index(index)
359 359 return index
360 360
361 361 def seek(self,index):
362 362 """Move the current seek pointer to the given block.
363 363
364 364 You can use negative indices to seek from the end, with identical
365 365 semantics to those of Python lists."""
366 366 if index<0:
367 367 index = self.nblocks + index
368 368 self._validate_index(index)
369 369 self.block_index = index
370 370 self.finished = False
371 371
372 372 def back(self,num=1):
373 373 """Move the seek pointer back num blocks (default is 1)."""
374 374 self.seek(self.block_index-num)
375 375
376 376 def jump(self,num=1):
377 377 """Jump a given number of blocks relative to the current one.
378 378
379 379 The offset can be positive or negative, defaults to 1."""
380 380 self.seek(self.block_index+num)
381 381
382 382 def again(self):
383 383 """Move the seek pointer back one block and re-execute."""
384 384 self.back(1)
385 385 self()
386 386
387 387 def edit(self,index=None):
388 388 """Edit a block.
389 389
390 390 If no number is given, use the last block executed.
391 391
392 392 This edits the in-memory copy of the demo, it does NOT modify the
393 393 original source file. If you want to do that, simply open the file in
394 394 an editor and use reload() when you make changes to the file. This
395 395 method is meant to let you change a block during a demonstration for
396 396 explanatory purposes, without damaging your original script."""
397 397
398 398 index = self._get_index(index)
399 399 if index is None:
400 400 return
401 401 # decrease the index by one (unless we're at the very beginning), so
402 402 # that the default demo.edit() call opens up the sblock we've last run
403 403 if index>0:
404 404 index -= 1
405 405
406 406 filename = self.shell.mktempfile(self.src_blocks[index])
407 407 self.shell.hooks.editor(filename, 1)
408 408 with open(Path(filename), "r") as f:
409 409 new_block = f.read()
410 410 # update the source and colored block
411 411 self.src_blocks[index] = new_block
412 412 self.src_blocks_colored[index] = self.highlight(new_block)
413 413 self.block_index = index
414 414 # call to run with the newly edited index
415 415 self()
416 416
417 417 def show(self,index=None):
418 418 """Show a single block on screen"""
419 419
420 420 index = self._get_index(index)
421 421 if index is None:
422 422 return
423 423
424 424 print(self.marquee('<%s> block # %s (%s remaining)' %
425 425 (self.title,index,self.nblocks-index-1)))
426 426 print(self.src_blocks_colored[index])
427 427 sys.stdout.flush()
428 428
429 429 def show_all(self):
430 430 """Show entire demo on screen, block by block"""
431 431
432 432 fname = self.title
433 433 title = self.title
434 434 nblocks = self.nblocks
435 435 silent = self._silent
436 436 marquee = self.marquee
437 437 for index,block in enumerate(self.src_blocks_colored):
438 438 if silent[index]:
439 439 print(marquee('<%s> SILENT block # %s (%s remaining)' %
440 440 (title,index,nblocks-index-1)))
441 441 else:
442 442 print(marquee('<%s> block # %s (%s remaining)' %
443 443 (title,index,nblocks-index-1)))
444 444 print(block, end=' ')
445 445 sys.stdout.flush()
446 446
447 447 def run_cell(self,source):
448 448 """Execute a string with one or more lines of code"""
449 449
450 450 exec(source, self.user_ns)
451 451
452 452 def __call__(self,index=None):
453 453 """run a block of the demo.
454 454
455 455 If index is given, it should be an integer >=1 and <= nblocks. This
456 456 means that the calling convention is one off from typical Python
457 457 lists. The reason for the inconsistency is that the demo always
458 458 prints 'Block n/N, and N is the total, so it would be very odd to use
459 459 zero-indexing here."""
460 460
461 461 index = self._get_index(index)
462 462 if index is None:
463 463 return
464 464 try:
465 465 marquee = self.marquee
466 466 next_block = self.src_blocks[index]
467 467 self.block_index += 1
468 468 if self._silent[index]:
469 469 print(marquee('Executing silent block # %s (%s remaining)' %
470 470 (index,self.nblocks-index-1)))
471 471 else:
472 472 self.pre_cmd()
473 473 self.show(index)
474 474 if self.auto_all or self._auto[index]:
475 475 print(marquee('output:'))
476 476 else:
477 477 print(marquee('Press <q> to quit, <Enter> to execute...'), end=' ')
478 478 ans = py3compat.input().strip()
479 479 if ans:
480 480 print(marquee('Block NOT executed'))
481 481 return
482 482 try:
483 483 save_argv = sys.argv
484 484 sys.argv = self.sys_argv
485 485 self.run_cell(next_block)
486 486 self.post_cmd()
487 487 finally:
488 488 sys.argv = save_argv
489 489
490 490 except:
491 491 if self.inside_ipython:
492 492 self.ip_showtb(filename=self.fname)
493 493 else:
494 494 if self.inside_ipython:
495 495 self.ip_ns.update(self.user_ns)
496 496
497 497 if self.block_index == self.nblocks:
498 498 mq1 = self.marquee('END OF DEMO')
499 499 if mq1:
500 500 # avoid spurious print if empty marquees are used
501 501 print()
502 502 print(mq1)
503 503 print(self.marquee('Use <demo_name>.reset() if you want to rerun it.'))
504 504 self.finished = True
505 505
506 506 # These methods are meant to be overridden by subclasses who may wish to
507 507 # customize the behavior of of their demos.
508 508 def marquee(self,txt='',width=78,mark='*'):
509 509 """Return the input string centered in a 'marquee'."""
510 510 return marquee(txt,width,mark)
511 511
512 512 def pre_cmd(self):
513 513 """Method called before executing each block."""
514 514 pass
515 515
516 516 def post_cmd(self):
517 517 """Method called after executing each block."""
518 518 pass
519 519
520 520 def highlight(self, block):
521 521 """Method called on each block to highlight it content"""
522 522 tokens = pygments.lex(block, self.python_lexer)
523 523 if self.format_rst:
524 524 from pygments.token import Token
525 525 toks = []
526 526 for token in tokens:
527 527 if token[0] == Token.String.Doc and len(token[1]) > 6:
528 528 toks += pygments.lex(token[1][:3], self.python_lexer)
529 529 # parse doc string content by rst lexer
530 530 toks += pygments.lex(token[1][3:-3], self.rst_lexer)
531 531 toks += pygments.lex(token[1][-3:], self.python_lexer)
532 532 elif token[0] == Token.Comment.Single:
533 533 toks.append((Token.Comment.Single, token[1][0]))
534 534 # parse comment content by rst lexer
535 # remove the extrat newline added by rst lexer
535 # remove the extra newline added by rst lexer
536 536 toks += list(pygments.lex(token[1][1:], self.rst_lexer))[:-1]
537 537 else:
538 538 toks.append(token)
539 539 tokens = toks
540 540 return pygments.format(tokens, self.formatter)
541 541
542 542
543 543 class IPythonDemo(Demo):
544 544 """Class for interactive demos with IPython's input processing applied.
545 545
546 546 This subclasses Demo, but instead of executing each block by the Python
547 547 interpreter (via exec), it actually calls IPython on it, so that any input
548 548 filters which may be in place are applied to the input block.
549 549
550 550 If you have an interactive environment which exposes special input
551 551 processing, you can use this class instead to write demo scripts which
552 552 operate exactly as if you had typed them interactively. The default Demo
553 553 class requires the input to be valid, pure Python code.
554 554 """
555 555
556 556 def run_cell(self,source):
557 557 """Execute a string with one or more lines of code"""
558 558
559 559 self.shell.run_cell(source)
560 560
561 561 class LineDemo(Demo):
562 562 """Demo where each line is executed as a separate block.
563 563
564 564 The input script should be valid Python code.
565 565
566 566 This class doesn't require any markup at all, and it's meant for simple
567 567 scripts (with no nesting or any kind of indentation) which consist of
568 568 multiple lines of input to be executed, one at a time, as if they had been
569 569 typed in the interactive prompt.
570 570
571 571 Note: the input can not have *any* indentation, which means that only
572 572 single-lines of input are accepted, not even function definitions are
573 573 valid."""
574 574
575 575 def reload(self):
576 576 """Reload source from disk and initialize state."""
577 577 # read data and parse into blocks
578 578 self.fload()
579 579 lines = self.fobj.readlines()
580 580 src_b = [l for l in lines if l.strip()]
581 581 nblocks = len(src_b)
582 582 self.src = ''.join(lines)
583 583 self._silent = [False]*nblocks
584 584 self._auto = [True]*nblocks
585 585 self.auto_all = True
586 586 self.nblocks = nblocks
587 587 self.src_blocks = src_b
588 588
589 589 # also build syntax-highlighted source
590 590 self.src_blocks_colored = list(map(self.highlight,self.src_blocks))
591 591
592 592 # ensure clean namespace and seek offset
593 593 self.reset()
594 594
595 595
596 596 class IPythonLineDemo(IPythonDemo,LineDemo):
597 597 """Variant of the LineDemo class whose input is processed by IPython."""
598 598 pass
599 599
600 600
601 601 class ClearMixin(object):
602 602 """Use this mixin to make Demo classes with less visual clutter.
603 603
604 604 Demos using this mixin will clear the screen before every block and use
605 605 blank marquees.
606 606
607 607 Note that in order for the methods defined here to actually override those
608 608 of the classes it's mixed with, it must go /first/ in the inheritance
609 609 tree. For example:
610 610
611 611 class ClearIPDemo(ClearMixin,IPythonDemo): pass
612 612
613 613 will provide an IPythonDemo class with the mixin's features.
614 614 """
615 615
616 616 def marquee(self,txt='',width=78,mark='*'):
617 617 """Blank marquee that returns '' no matter what the input."""
618 618 return ''
619 619
620 620 def pre_cmd(self):
621 621 """Method called before executing each block.
622 622
623 623 This one simply clears the screen."""
624 624 from IPython.utils.terminal import _term_clear
625 625 _term_clear()
626 626
627 627 class ClearDemo(ClearMixin,Demo):
628 628 pass
629 629
630 630
631 631 class ClearIPDemo(ClearMixin,IPythonDemo):
632 632 pass
633 633
634 634
635 635 def slide(file_path, noclear=False, format_rst=True, formatter="terminal",
636 636 style="native", auto_all=False, delimiter='...'):
637 637 if noclear:
638 638 demo_class = Demo
639 639 else:
640 640 demo_class = ClearDemo
641 641 demo = demo_class(file_path, format_rst=format_rst, formatter=formatter,
642 642 style=style, auto_all=auto_all)
643 643 while not demo.finished:
644 644 demo()
645 645 try:
646 646 py3compat.input('\n' + delimiter)
647 647 except KeyboardInterrupt:
648 648 exit(1)
649 649
650 650 if __name__ == '__main__':
651 651 import argparse
652 652 parser = argparse.ArgumentParser(description='Run python demos')
653 653 parser.add_argument('--noclear', '-C', action='store_true',
654 654 help='Do not clear terminal on each slide')
655 655 parser.add_argument('--rst', '-r', action='store_true',
656 656 help='Highlight comments and dostrings as rst')
657 657 parser.add_argument('--formatter', '-f', default='terminal',
658 658 help='pygments formatter name could be: terminal, '
659 659 'terminal256, terminal16m')
660 660 parser.add_argument('--style', '-s', default='default',
661 661 help='pygments style name')
662 662 parser.add_argument('--auto', '-a', action='store_true',
663 663 help='Run all blocks automatically without'
664 664 'confirmation')
665 665 parser.add_argument('--delimiter', '-d', default='...',
666 666 help='slides delimiter added after each slide run')
667 667 parser.add_argument('file', nargs=1,
668 668 help='python demo file')
669 669 args = parser.parse_args()
670 670 slide(args.file[0], noclear=args.noclear, format_rst=args.rst,
671 671 formatter=args.formatter, style=args.style, auto_all=args.auto,
672 672 delimiter=args.delimiter)
@@ -1,399 +1,399 b''
1 1 # -*- coding: utf-8 -*-
2 2 """Decorators for labeling test objects.
3 3
4 4 Decorators that merely return a modified version of the original function
5 5 object are straightforward. Decorators that return a new function object need
6 6 to use nose.tools.make_decorator(original_function)(decorator) in returning the
7 7 decorator, in order to preserve metadata such as function name, setup and
8 8 teardown functions and so on - see nose.tools for more information.
9 9
10 10 This module provides a set of useful decorators meant to be ready to use in
11 11 your own tests. See the bottom of the file for the ready-made ones, and if you
12 12 find yourself writing a new one that may be of generic use, add it here.
13 13
14 14 Included decorators:
15 15
16 16
17 17 Lightweight testing that remains unittest-compatible.
18 18
19 19 - An @as_unittest decorator can be used to tag any normal parameter-less
20 20 function as a unittest TestCase. Then, both nose and normal unittest will
21 21 recognize it as such. This will make it easier to migrate away from Nose if
22 22 we ever need/want to while maintaining very lightweight tests.
23 23
24 24 NOTE: This file contains IPython-specific decorators. Using the machinery in
25 25 IPython.external.decorators, we import either numpy.testing.decorators if numpy is
26 26 available, OR use equivalent code in IPython.external._decorators, which
27 27 we've copied verbatim from numpy.
28 28
29 29 """
30 30
31 31 # Copyright (c) IPython Development Team.
32 32 # Distributed under the terms of the Modified BSD License.
33 33
34 34 import os
35 35 import shutil
36 36 import sys
37 37 import tempfile
38 38 import unittest
39 39 import warnings
40 40 from importlib import import_module
41 41
42 42 from decorator import decorator
43 43
44 44 # Expose the unittest-driven decorators
45 45 from .ipunittest import ipdoctest, ipdocstring
46 46
47 47 # Grab the numpy-specific decorators which we keep in a file that we
48 48 # occasionally update from upstream: decorators.py is a copy of
49 49 # numpy.testing.decorators, we expose all of it here.
50 50 from IPython.external.decorators import knownfailureif
51 51
52 52 #-----------------------------------------------------------------------------
53 53 # Classes and functions
54 54 #-----------------------------------------------------------------------------
55 55
56 56 # Simple example of the basic idea
57 57 def as_unittest(func):
58 58 """Decorator to make a simple function into a normal test via unittest."""
59 59 class Tester(unittest.TestCase):
60 60 def test(self):
61 61 func()
62 62
63 63 Tester.__name__ = func.__name__
64 64
65 65 return Tester
66 66
67 67 # Utility functions
68 68
69 69 def apply_wrapper(wrapper, func):
70 70 """Apply a wrapper to a function for decoration.
71 71
72 72 This mixes Michele Simionato's decorator tool with nose's make_decorator,
73 73 to apply a wrapper in a decorator so that all nose attributes, as well as
74 74 function signature and other properties, survive the decoration cleanly.
75 75 This will ensure that wrapped functions can still be well introspected via
76 76 IPython, for example.
77 77 """
78 78 warnings.warn("The function `apply_wrapper` is deprecated since IPython 4.0",
79 79 DeprecationWarning, stacklevel=2)
80 80 import nose.tools
81 81
82 82 return decorator(wrapper,nose.tools.make_decorator(func)(wrapper))
83 83
84 84
85 85 def make_label_dec(label, ds=None):
86 86 """Factory function to create a decorator that applies one or more labels.
87 87
88 88 Parameters
89 89 ----------
90 90 label : string or sequence
91 91 One or more labels that will be applied by the decorator to the functions
92 92 it decorates. Labels are attributes of the decorated function with their
93 93 value set to True.
94 94
95 95 ds : string
96 96 An optional docstring for the resulting decorator. If not given, a
97 97 default docstring is auto-generated.
98 98
99 99 Returns
100 100 -------
101 101 A decorator.
102 102
103 103 Examples
104 104 --------
105 105
106 106 A simple labeling decorator:
107 107
108 108 >>> slow = make_label_dec('slow')
109 109 >>> slow.__doc__
110 110 "Labels a test as 'slow'."
111 111
112 112 And one that uses multiple labels and a custom docstring:
113 113
114 114 >>> rare = make_label_dec(['slow','hard'],
115 115 ... "Mix labels 'slow' and 'hard' for rare tests.")
116 116 >>> rare.__doc__
117 117 "Mix labels 'slow' and 'hard' for rare tests."
118 118
119 119 Now, let's test using this one:
120 120 >>> @rare
121 121 ... def f(): pass
122 122 ...
123 123 >>>
124 124 >>> f.slow
125 125 True
126 126 >>> f.hard
127 127 True
128 128 """
129 129
130 130 warnings.warn("The function `make_label_dec` is deprecated since IPython 4.0",
131 131 DeprecationWarning, stacklevel=2)
132 132 if isinstance(label, str):
133 133 labels = [label]
134 134 else:
135 135 labels = label
136 136
137 137 # Validate that the given label(s) are OK for use in setattr() by doing a
138 138 # dry run on a dummy function.
139 139 tmp = lambda : None
140 140 for label in labels:
141 141 setattr(tmp,label,True)
142 142
143 143 # This is the actual decorator we'll return
144 144 def decor(f):
145 145 for label in labels:
146 146 setattr(f,label,True)
147 147 return f
148 148
149 149 # Apply the user's docstring, or autogenerate a basic one
150 150 if ds is None:
151 151 ds = "Labels a test as %r." % label
152 152 decor.__doc__ = ds
153 153
154 154 return decor
155 155
156 156
157 157 def skip_iptest_but_not_pytest(f):
158 158 """
159 Warnign this will make the test invisible to iptest.
159 Warning this will make the test invisible to iptest.
160 160 """
161 161 import os
162 162
163 163 if os.environ.get("IPTEST_WORKING_DIR", None) is not None:
164 164 f.__test__ = False
165 165 return f
166 166
167 167
168 168 # Inspired by numpy's skipif, but uses the full apply_wrapper utility to
169 169 # preserve function metadata better and allows the skip condition to be a
170 170 # callable.
171 171 def skipif(skip_condition, msg=None):
172 172 ''' Make function raise SkipTest exception if skip_condition is true
173 173
174 174 Parameters
175 175 ----------
176 176
177 177 skip_condition : bool or callable
178 178 Flag to determine whether to skip test. If the condition is a
179 179 callable, it is used at runtime to dynamically make the decision. This
180 180 is useful for tests that may require costly imports, to delay the cost
181 181 until the test suite is actually executed.
182 182 msg : string
183 183 Message to give on raising a SkipTest exception.
184 184
185 185 Returns
186 186 -------
187 187 decorator : function
188 188 Decorator, which, when applied to a function, causes SkipTest
189 189 to be raised when the skip_condition was True, and the function
190 190 to be called normally otherwise.
191 191
192 192 Notes
193 193 -----
194 194 You will see from the code that we had to further decorate the
195 195 decorator with the nose.tools.make_decorator function in order to
196 196 transmit function name, and various other metadata.
197 197 '''
198 198
199 199 def skip_decorator(f):
200 200 # Local import to avoid a hard nose dependency and only incur the
201 201 # import time overhead at actual test-time.
202 202 import nose
203 203
204 204 # Allow for both boolean or callable skip conditions.
205 205 if callable(skip_condition):
206 206 skip_val = skip_condition
207 207 else:
208 208 skip_val = lambda : skip_condition
209 209
210 210 def get_msg(func,msg=None):
211 211 """Skip message with information about function being skipped."""
212 212 if msg is None: out = 'Test skipped due to test condition.'
213 213 else: out = msg
214 214 return "Skipping test: %s. %s" % (func.__name__,out)
215 215
216 216 # We need to define *two* skippers because Python doesn't allow both
217 217 # return with value and yield inside the same function.
218 218 def skipper_func(*args, **kwargs):
219 219 """Skipper for normal test functions."""
220 220 if skip_val():
221 221 raise nose.SkipTest(get_msg(f,msg))
222 222 else:
223 223 return f(*args, **kwargs)
224 224
225 225 def skipper_gen(*args, **kwargs):
226 226 """Skipper for test generators."""
227 227 if skip_val():
228 228 raise nose.SkipTest(get_msg(f,msg))
229 229 else:
230 230 for x in f(*args, **kwargs):
231 231 yield x
232 232
233 233 # Choose the right skipper to use when building the actual generator.
234 234 if nose.util.isgenerator(f):
235 235 skipper = skipper_gen
236 236 else:
237 237 skipper = skipper_func
238 238
239 239 return nose.tools.make_decorator(f)(skipper)
240 240
241 241 return skip_decorator
242 242
243 243 # A version with the condition set to true, common case just to attach a message
244 244 # to a skip decorator
245 245 def skip(msg=None):
246 246 """Decorator factory - mark a test function for skipping from test suite.
247 247
248 248 Parameters
249 249 ----------
250 250 msg : string
251 251 Optional message to be added.
252 252
253 253 Returns
254 254 -------
255 255 decorator : function
256 256 Decorator, which, when applied to a function, causes SkipTest
257 257 to be raised, with the optional message added.
258 258 """
259 259 if msg and not isinstance(msg, str):
260 260 raise ValueError('invalid object passed to `@skip` decorator, did you '
261 261 'meant `@skip()` with brackets ?')
262 262 return skipif(True, msg)
263 263
264 264
265 265 def onlyif(condition, msg):
266 266 """The reverse from skipif, see skipif for details."""
267 267
268 268 if callable(condition):
269 269 skip_condition = lambda : not condition()
270 270 else:
271 271 skip_condition = lambda : not condition
272 272
273 273 return skipif(skip_condition, msg)
274 274
275 275 #-----------------------------------------------------------------------------
276 276 # Utility functions for decorators
277 277 def module_not_available(module):
278 278 """Can module be imported? Returns true if module does NOT import.
279 279
280 280 This is used to make a decorator to skip tests that require module to be
281 281 available, but delay the 'import numpy' to test execution time.
282 282 """
283 283 try:
284 284 mod = import_module(module)
285 285 mod_not_avail = False
286 286 except ImportError:
287 287 mod_not_avail = True
288 288
289 289 return mod_not_avail
290 290
291 291
292 292 def decorated_dummy(dec, name):
293 293 """Return a dummy function decorated with dec, with the given name.
294 294
295 295 Examples
296 296 --------
297 297 import IPython.testing.decorators as dec
298 298 setup = dec.decorated_dummy(dec.skip_if_no_x11, __name__)
299 299 """
300 300 warnings.warn("The function `decorated_dummy` is deprecated since IPython 4.0",
301 301 DeprecationWarning, stacklevel=2)
302 302 dummy = lambda: None
303 303 dummy.__name__ = name
304 304 return dec(dummy)
305 305
306 306 #-----------------------------------------------------------------------------
307 307 # Decorators for public use
308 308
309 309 # Decorators to skip certain tests on specific platforms.
310 310 skip_win32 = skipif(sys.platform == 'win32',
311 311 "This test does not run under Windows")
312 312 skip_linux = skipif(sys.platform.startswith('linux'),
313 313 "This test does not run under Linux")
314 314 skip_osx = skipif(sys.platform == 'darwin',"This test does not run under OS X")
315 315
316 316
317 317 # Decorators to skip tests if not on specific platforms.
318 318 skip_if_not_win32 = skipif(sys.platform != 'win32',
319 319 "This test only runs under Windows")
320 320 skip_if_not_linux = skipif(not sys.platform.startswith('linux'),
321 321 "This test only runs under Linux")
322 322 skip_if_not_osx = skipif(sys.platform != 'darwin',
323 323 "This test only runs under OSX")
324 324
325 325
326 326 _x11_skip_cond = (sys.platform not in ('darwin', 'win32') and
327 327 os.environ.get('DISPLAY', '') == '')
328 328 _x11_skip_msg = "Skipped under *nix when X11/XOrg not available"
329 329
330 330 skip_if_no_x11 = skipif(_x11_skip_cond, _x11_skip_msg)
331 331
332 332
333 333 # Decorators to skip certain tests on specific platform/python combinations
334 334 skip_win32_py38 = skipif(sys.version_info > (3,8) and os.name == 'nt')
335 335
336 336
337 337 # not a decorator itself, returns a dummy function to be used as setup
338 338 def skip_file_no_x11(name):
339 339 warnings.warn("The function `skip_file_no_x11` is deprecated since IPython 4.0",
340 340 DeprecationWarning, stacklevel=2)
341 341 return decorated_dummy(skip_if_no_x11, name) if _x11_skip_cond else None
342 342
343 343 # Other skip decorators
344 344
345 345 # generic skip without module
346 346 skip_without = lambda mod: skipif(module_not_available(mod), "This test requires %s" % mod)
347 347
348 348 skipif_not_numpy = skip_without('numpy')
349 349
350 350 skipif_not_matplotlib = skip_without('matplotlib')
351 351
352 352 skipif_not_sympy = skip_without('sympy')
353 353
354 354 skip_known_failure = knownfailureif(True,'This test is known to fail')
355 355
356 356 # A null 'decorator', useful to make more readable code that needs to pick
357 357 # between different decorators based on OS or other conditions
358 358 null_deco = lambda f: f
359 359
360 360 # Some tests only run where we can use unicode paths. Note that we can't just
361 361 # check os.path.supports_unicode_filenames, which is always False on Linux.
362 362 try:
363 363 f = tempfile.NamedTemporaryFile(prefix=u"tmp€")
364 364 except UnicodeEncodeError:
365 365 unicode_paths = False
366 366 else:
367 367 unicode_paths = True
368 368 f.close()
369 369
370 370 onlyif_unicode_paths = onlyif(unicode_paths, ("This test is only applicable "
371 371 "where we can use unicode in filenames."))
372 372
373 373
374 374 def onlyif_cmds_exist(*commands):
375 375 """
376 376 Decorator to skip test when at least one of `commands` is not found.
377 377 """
378 378 for cmd in commands:
379 379 reason = "This test runs only if command '{cmd}' is installed"
380 380 if not shutil.which(cmd):
381 381 if os.environ.get("IPTEST_WORKING_DIR", None) is not None:
382 382 return skip(reason)
383 383 else:
384 384 import pytest
385 385
386 386 return pytest.mark.skip(reason=reason)
387 387 return null_deco
388 388
389 389 def onlyif_any_cmd_exists(*commands):
390 390 """
391 391 Decorator to skip test unless at least one of `commands` is found.
392 392 """
393 393 warnings.warn("The function `onlyif_any_cmd_exists` is deprecated since IPython 4.0",
394 394 DeprecationWarning, stacklevel=2)
395 395 for cmd in commands:
396 396 if shutil.which(cmd):
397 397 return null_deco
398 398 return skip("This test runs only if one of the commands {0} "
399 399 "is installed".format(commands))
@@ -1,440 +1,440 b''
1 1 # encoding: utf-8
2 2 """
3 3 Utilities for path handling.
4 4 """
5 5
6 6 # Copyright (c) IPython Development Team.
7 7 # Distributed under the terms of the Modified BSD License.
8 8
9 9 import os
10 10 import sys
11 11 import errno
12 12 import shutil
13 13 import random
14 14 import glob
15 15 from warnings import warn
16 16
17 17 from IPython.utils.process import system
18 18 from IPython.utils.decorators import undoc
19 19
20 20 #-----------------------------------------------------------------------------
21 21 # Code
22 22 #-----------------------------------------------------------------------------
23 23 fs_encoding = sys.getfilesystemencoding()
24 24
25 25 def _writable_dir(path):
26 26 """Whether `path` is a directory, to which the user has write access."""
27 27 return os.path.isdir(path) and os.access(path, os.W_OK)
28 28
29 29 if sys.platform == 'win32':
30 30 def _get_long_path_name(path):
31 31 """Get a long path name (expand ~) on Windows using ctypes.
32 32
33 33 Examples
34 34 --------
35 35
36 36 >>> get_long_path_name('c:\\docume~1')
37 37 'c:\\\\Documents and Settings'
38 38
39 39 """
40 40 try:
41 41 import ctypes
42 42 except ImportError as e:
43 43 raise ImportError('you need to have ctypes installed for this to work') from e
44 44 _GetLongPathName = ctypes.windll.kernel32.GetLongPathNameW
45 45 _GetLongPathName.argtypes = [ctypes.c_wchar_p, ctypes.c_wchar_p,
46 46 ctypes.c_uint ]
47 47
48 48 buf = ctypes.create_unicode_buffer(260)
49 49 rv = _GetLongPathName(path, buf, 260)
50 50 if rv == 0 or rv > 260:
51 51 return path
52 52 else:
53 53 return buf.value
54 54 else:
55 55 def _get_long_path_name(path):
56 56 """Dummy no-op."""
57 57 return path
58 58
59 59
60 60
61 61 def get_long_path_name(path):
62 62 """Expand a path into its long form.
63 63
64 64 On Windows this expands any ~ in the paths. On other platforms, it is
65 65 a null operation.
66 66 """
67 67 return _get_long_path_name(path)
68 68
69 69
70 70 def unquote_filename(name, win32=(sys.platform=='win32')):
71 71 """ On Windows, remove leading and trailing quotes from filenames.
72 72
73 73 This function has been deprecated and should not be used any more:
74 74 unquoting is now taken care of by :func:`IPython.utils.process.arg_split`.
75 75 """
76 76 warn("'unquote_filename' is deprecated since IPython 5.0 and should not "
77 77 "be used anymore", DeprecationWarning, stacklevel=2)
78 78 if win32:
79 79 if name.startswith(("'", '"')) and name.endswith(("'", '"')):
80 80 name = name[1:-1]
81 81 return name
82 82
83 83
84 84 def compress_user(path):
85 85 """Reverse of :func:`os.path.expanduser`
86 86 """
87 87 home = os.path.expanduser('~')
88 88 if path.startswith(home):
89 89 path = "~" + path[len(home):]
90 90 return path
91 91
92 92 def get_py_filename(name, force_win32=None):
93 93 """Return a valid python filename in the current directory.
94 94
95 95 If the given name is not a file, it adds '.py' and searches again.
96 96 Raises IOError with an informative message if the file isn't found.
97 97 """
98 98
99 99 name = os.path.expanduser(name)
100 100 if force_win32 is not None:
101 101 warn("The 'force_win32' argument to 'get_py_filename' is deprecated "
102 102 "since IPython 5.0 and should not be used anymore",
103 103 DeprecationWarning, stacklevel=2)
104 104 if not os.path.isfile(name) and not name.endswith('.py'):
105 105 name += '.py'
106 106 if os.path.isfile(name):
107 107 return name
108 108 else:
109 109 raise IOError('File `%r` not found.' % name)
110 110
111 111
112 112 def filefind(filename: str, path_dirs=None) -> str:
113 113 """Find a file by looking through a sequence of paths.
114 114
115 115 This iterates through a sequence of paths looking for a file and returns
116 116 the full, absolute path of the first occurrence of the file. If no set of
117 117 path dirs is given, the filename is tested as is, after running through
118 118 :func:`expandvars` and :func:`expanduser`. Thus a simple call::
119 119
120 120 filefind('myfile.txt')
121 121
122 122 will find the file in the current working dir, but::
123 123
124 124 filefind('~/myfile.txt')
125 125
126 126 Will find the file in the users home directory. This function does not
127 127 automatically try any paths, such as the cwd or the user's home directory.
128 128
129 129 Parameters
130 130 ----------
131 131 filename : str
132 132 The filename to look for.
133 133 path_dirs : str, None or sequence of str
134 134 The sequence of paths to look for the file in. If None, the filename
135 135 need to be absolute or be in the cwd. If a string, the string is
136 136 put into a sequence and the searched. If a sequence, walk through
137 137 each element and join with ``filename``, calling :func:`expandvars`
138 138 and :func:`expanduser` before testing for existence.
139 139
140 140 Returns
141 141 -------
142 142 path : str
143 143 returns absolute path to file.
144 144
145 145 Raises
146 146 ------
147 147 IOError
148 148 """
149 149
150 150 # If paths are quoted, abspath gets confused, strip them...
151 151 filename = filename.strip('"').strip("'")
152 152 # If the input is an absolute path, just check it exists
153 153 if os.path.isabs(filename) and os.path.isfile(filename):
154 154 return filename
155 155
156 156 if path_dirs is None:
157 157 path_dirs = ("",)
158 158 elif isinstance(path_dirs, str):
159 159 path_dirs = (path_dirs,)
160 160
161 161 for path in path_dirs:
162 162 if path == '.': path = os.getcwd()
163 163 testname = expand_path(os.path.join(path, filename))
164 164 if os.path.isfile(testname):
165 165 return os.path.abspath(testname)
166 166
167 167 raise IOError("File %r does not exist in any of the search paths: %r" %
168 168 (filename, path_dirs) )
169 169
170 170
171 171 class HomeDirError(Exception):
172 172 pass
173 173
174 174
175 175 def get_home_dir(require_writable=False) -> str:
176 176 """Return the 'home' directory, as a unicode string.
177 177
178 178 Uses os.path.expanduser('~'), and checks for writability.
179 179
180 180 See stdlib docs for how this is determined.
181 181 For Python <3.8, $HOME is first priority on *ALL* platforms.
182 182 For Python >=3.8 on Windows, %HOME% is no longer considered.
183 183
184 184 Parameters
185 185 ----------
186 186 require_writable : bool [default: False]
187 187 if True:
188 188 guarantees the return value is a writable directory, otherwise
189 189 raises HomeDirError
190 190 if False:
191 191 The path is resolved, but it is not guaranteed to exist or be writable.
192 192 """
193 193
194 194 homedir = os.path.expanduser('~')
195 195 # Next line will make things work even when /home/ is a symlink to
196 196 # /usr/home as it is on FreeBSD, for example
197 197 homedir = os.path.realpath(homedir)
198 198
199 199 if not _writable_dir(homedir) and os.name == 'nt':
200 200 # expanduser failed, use the registry to get the 'My Documents' folder.
201 201 try:
202 202 import winreg as wreg
203 203 with wreg.OpenKey(
204 204 wreg.HKEY_CURRENT_USER,
205 205 r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
206 206 ) as key:
207 207 homedir = wreg.QueryValueEx(key,'Personal')[0]
208 208 except:
209 209 pass
210 210
211 211 if (not require_writable) or _writable_dir(homedir):
212 assert isinstance(homedir, str), "Homedir shoudl be unicode not bytes"
212 assert isinstance(homedir, str), "Homedir should be unicode not bytes"
213 213 return homedir
214 214 else:
215 215 raise HomeDirError('%s is not a writable dir, '
216 216 'set $HOME environment variable to override' % homedir)
217 217
218 218 def get_xdg_dir():
219 219 """Return the XDG_CONFIG_HOME, if it is defined and exists, else None.
220 220
221 221 This is only for non-OS X posix (Linux,Unix,etc.) systems.
222 222 """
223 223
224 224 env = os.environ
225 225
226 226 if os.name == 'posix' and sys.platform != 'darwin':
227 227 # Linux, Unix, AIX, etc.
228 228 # use ~/.config if empty OR not set
229 229 xdg = env.get("XDG_CONFIG_HOME", None) or os.path.join(get_home_dir(), '.config')
230 230 if xdg and _writable_dir(xdg):
231 231 assert isinstance(xdg, str)
232 232 return xdg
233 233
234 234 return None
235 235
236 236
237 237 def get_xdg_cache_dir():
238 238 """Return the XDG_CACHE_HOME, if it is defined and exists, else None.
239 239
240 240 This is only for non-OS X posix (Linux,Unix,etc.) systems.
241 241 """
242 242
243 243 env = os.environ
244 244
245 245 if os.name == 'posix' and sys.platform != 'darwin':
246 246 # Linux, Unix, AIX, etc.
247 247 # use ~/.cache if empty OR not set
248 248 xdg = env.get("XDG_CACHE_HOME", None) or os.path.join(get_home_dir(), '.cache')
249 249 if xdg and _writable_dir(xdg):
250 250 assert isinstance(xdg, str)
251 251 return xdg
252 252
253 253 return None
254 254
255 255
256 256 @undoc
257 257 def get_ipython_dir():
258 258 warn("get_ipython_dir has moved to the IPython.paths module since IPython 4.0.", DeprecationWarning, stacklevel=2)
259 259 from IPython.paths import get_ipython_dir
260 260 return get_ipython_dir()
261 261
262 262 @undoc
263 263 def get_ipython_cache_dir():
264 264 warn("get_ipython_cache_dir has moved to the IPython.paths module since IPython 4.0.", DeprecationWarning, stacklevel=2)
265 265 from IPython.paths import get_ipython_cache_dir
266 266 return get_ipython_cache_dir()
267 267
268 268 @undoc
269 269 def get_ipython_package_dir():
270 270 warn("get_ipython_package_dir has moved to the IPython.paths module since IPython 4.0.", DeprecationWarning, stacklevel=2)
271 271 from IPython.paths import get_ipython_package_dir
272 272 return get_ipython_package_dir()
273 273
274 274 @undoc
275 275 def get_ipython_module_path(module_str):
276 276 warn("get_ipython_module_path has moved to the IPython.paths module since IPython 4.0.", DeprecationWarning, stacklevel=2)
277 277 from IPython.paths import get_ipython_module_path
278 278 return get_ipython_module_path(module_str)
279 279
280 280 @undoc
281 281 def locate_profile(profile='default'):
282 282 warn("locate_profile has moved to the IPython.paths module since IPython 4.0.", DeprecationWarning, stacklevel=2)
283 283 from IPython.paths import locate_profile
284 284 return locate_profile(profile=profile)
285 285
286 286 def expand_path(s):
287 287 """Expand $VARS and ~names in a string, like a shell
288 288
289 289 :Examples:
290 290
291 291 In [2]: os.environ['FOO']='test'
292 292
293 293 In [3]: expand_path('variable FOO is $FOO')
294 294 Out[3]: 'variable FOO is test'
295 295 """
296 296 # This is a pretty subtle hack. When expand user is given a UNC path
297 297 # on Windows (\\server\share$\%username%), os.path.expandvars, removes
298 298 # the $ to get (\\server\share\%username%). I think it considered $
299 299 # alone an empty var. But, we need the $ to remains there (it indicates
300 300 # a hidden share).
301 301 if os.name=='nt':
302 302 s = s.replace('$\\', 'IPYTHON_TEMP')
303 303 s = os.path.expandvars(os.path.expanduser(s))
304 304 if os.name=='nt':
305 305 s = s.replace('IPYTHON_TEMP', '$\\')
306 306 return s
307 307
308 308
309 309 def unescape_glob(string):
310 310 """Unescape glob pattern in `string`."""
311 311 def unescape(s):
312 312 for pattern in '*[]!?':
313 313 s = s.replace(r'\{0}'.format(pattern), pattern)
314 314 return s
315 315 return '\\'.join(map(unescape, string.split('\\\\')))
316 316
317 317
318 318 def shellglob(args):
319 319 """
320 320 Do glob expansion for each element in `args` and return a flattened list.
321 321
322 322 Unmatched glob pattern will remain as-is in the returned list.
323 323
324 324 """
325 325 expanded = []
326 326 # Do not unescape backslash in Windows as it is interpreted as
327 327 # path separator:
328 328 unescape = unescape_glob if sys.platform != 'win32' else lambda x: x
329 329 for a in args:
330 330 expanded.extend(glob.glob(a) or [unescape(a)])
331 331 return expanded
332 332
333 333
334 334 def target_outdated(target,deps):
335 335 """Determine whether a target is out of date.
336 336
337 337 target_outdated(target,deps) -> 1/0
338 338
339 339 deps: list of filenames which MUST exist.
340 340 target: single filename which may or may not exist.
341 341
342 342 If target doesn't exist or is older than any file listed in deps, return
343 343 true, otherwise return false.
344 344 """
345 345 try:
346 346 target_time = os.path.getmtime(target)
347 347 except os.error:
348 348 return 1
349 349 for dep in deps:
350 350 dep_time = os.path.getmtime(dep)
351 351 if dep_time > target_time:
352 352 #print "For target",target,"Dep failed:",dep # dbg
353 353 #print "times (dep,tar):",dep_time,target_time # dbg
354 354 return 1
355 355 return 0
356 356
357 357
358 358 def target_update(target,deps,cmd):
359 359 """Update a target with a given command given a list of dependencies.
360 360
361 361 target_update(target,deps,cmd) -> runs cmd if target is outdated.
362 362
363 363 This is just a wrapper around target_outdated() which calls the given
364 364 command if target is outdated."""
365 365
366 366 if target_outdated(target,deps):
367 367 system(cmd)
368 368
369 369
370 370 ENOLINK = 1998
371 371
372 372 def link(src, dst):
373 373 """Hard links ``src`` to ``dst``, returning 0 or errno.
374 374
375 375 Note that the special errno ``ENOLINK`` will be returned if ``os.link`` isn't
376 376 supported by the operating system.
377 377 """
378 378
379 379 if not hasattr(os, "link"):
380 380 return ENOLINK
381 381 link_errno = 0
382 382 try:
383 383 os.link(src, dst)
384 384 except OSError as e:
385 385 link_errno = e.errno
386 386 return link_errno
387 387
388 388
389 389 def link_or_copy(src, dst):
390 390 """Attempts to hardlink ``src`` to ``dst``, copying if the link fails.
391 391
392 392 Attempts to maintain the semantics of ``shutil.copy``.
393 393
394 394 Because ``os.link`` does not overwrite files, a unique temporary file
395 395 will be used if the target already exists, then that file will be moved
396 396 into place.
397 397 """
398 398
399 399 if os.path.isdir(dst):
400 400 dst = os.path.join(dst, os.path.basename(src))
401 401
402 402 link_errno = link(src, dst)
403 403 if link_errno == errno.EEXIST:
404 404 if os.stat(src).st_ino == os.stat(dst).st_ino:
405 405 # dst is already a hard link to the correct file, so we don't need
406 406 # to do anything else. If we try to link and rename the file
407 407 # anyway, we get duplicate files - see http://bugs.python.org/issue21876
408 408 return
409 409
410 410 new_dst = dst + "-temp-%04X" %(random.randint(1, 16**4), )
411 411 try:
412 412 link_or_copy(src, new_dst)
413 413 except:
414 414 try:
415 415 os.remove(new_dst)
416 416 except OSError:
417 417 pass
418 418 raise
419 419 os.rename(new_dst, dst)
420 420 elif link_errno != 0:
421 421 # Either link isn't supported, or the filesystem doesn't support
422 422 # linking, or 'src' and 'dst' are on different filesystems.
423 423 shutil.copy(src, dst)
424 424
425 425 def ensure_dir_exists(path, mode=0o755):
426 426 """ensure that a directory exists
427 427
428 428 If it doesn't exist, try to create it and protect against a race condition
429 429 if another process is doing the same.
430 430
431 431 The default permissions are 755, which differ from os.makedirs default of 777.
432 432 """
433 433 if not os.path.exists(path):
434 434 try:
435 435 os.makedirs(path, mode=mode)
436 436 except OSError as e:
437 437 if e.errno != errno.EEXIST:
438 438 raise
439 439 elif not os.path.isdir(path):
440 440 raise IOError("%r exists but is not a directory" % path)
@@ -1,39 +1,39 b''
1 1 #!/usr/bin/env python
2 2 """Simple GTK example to manually test event loop integration.
3 3
4 4 This is meant to run tests manually in ipython as:
5 5
6 6 In [5]: %gui gtk
7 7
8 8 In [6]: %run gui-gtk.py
9 9 """
10 10
11 11 import pygtk
12 12 pygtk.require('2.0')
13 13 import gtk
14 14
15 15
16 def hello_world(wigdet, data=None):
16 def hello_world(widget, data=None):
17 17 print("Hello World")
18 18
19 19 def delete_event(widget, event, data=None):
20 20 return False
21 21
22 22 def destroy(widget, data=None):
23 23 gtk.main_quit()
24 24
25 25 window = gtk.Window(gtk.WINDOW_TOPLEVEL)
26 26 window.connect("delete_event", delete_event)
27 27 window.connect("destroy", destroy)
28 28 button = gtk.Button("Hello World")
29 29 button.connect("clicked", hello_world, None)
30 30
31 31 window.add(button)
32 32 button.show()
33 33 window.show()
34 34
35 35 try:
36 36 from IPython.lib.inputhook import enable_gui
37 37 enable_gui('gtk')
38 38 except ImportError:
39 39 gtk.main()
@@ -1,37 +1,37 b''
1 1 #!/usr/bin/env python
2 2 """Simple Gtk example to manually test event loop integration.
3 3
4 4 This is meant to run tests manually in ipython as:
5 5
6 6 In [1]: %gui gtk3
7 7
8 8 In [2]: %run gui-gtk3.py
9 9 """
10 10
11 11 from gi.repository import Gtk
12 12
13 13
14 def hello_world(wigdet, data=None):
14 def hello_world(widget, data=None):
15 15 print("Hello World")
16 16
17 17 def delete_event(widget, event, data=None):
18 18 return False
19 19
20 20 def destroy(widget, data=None):
21 21 Gtk.main_quit()
22 22
23 23 window = Gtk.Window(type=Gtk.WindowType.TOPLEVEL)
24 24 window.connect("delete_event", delete_event)
25 25 window.connect("destroy", destroy)
26 26 button = Gtk.Button(label="Hello World")
27 27 button.connect("clicked", hello_world, None)
28 28
29 29 window.add(button)
30 30 button.show()
31 31 window.show()
32 32
33 33 try:
34 34 from IPython.lib.inputhook import enable_gui
35 35 enable_gui('gtk3')
36 36 except ImportError:
37 37 Gtk.main()
@@ -1,37 +1,37 b''
1 1 #!/usr/bin/env python
2 2 """Simple Gtk example to manually test event loop integration.
3 3
4 4 This is meant to run tests manually in ipython as:
5 5
6 6 In [1]: %gui gtk4
7 7
8 8 In [2]: %run gui-gtk4.py
9 9 """
10 10
11 11 import gi
12 12
13 13 gi.require_version("Gtk", "4.0")
14 14 from gi.repository import Gtk, GLib # noqa
15 15
16 16
17 def hello_world(wigdet, data=None):
17 def hello_world(widget, data=None):
18 18 print("Hello World")
19 19
20 20
21 21 def close_request_cb(widget, data=None):
22 22 global running
23 23 running = False
24 24
25 25
26 26 running = True
27 27 window = Gtk.Window()
28 28 window.connect("close-request", close_request_cb)
29 29 button = Gtk.Button(label="Hello World")
30 30 button.connect("clicked", hello_world, None)
31 31
32 32 window.set_child(button)
33 33 window.show()
34 34
35 35 context = GLib.MainContext.default()
36 36 while running:
37 37 context.iteration(True)
@@ -1,239 +1,239 b''
1 1 # Simple tool to help for release
2 2 # when releasing with bash, simple source it to get asked questions.
3 3
4 4 # misc check before starting
5 5
6 6 python -c 'import keyring'
7 7 python -c 'import twine'
8 8 python -c 'import sphinx'
9 9 python -c 'import sphinx_rtd_theme'
10 10 python -c 'import nose'
11 11
12 12
13 13 BLACK=$(tput setaf 1)
14 14 RED=$(tput setaf 1)
15 15 GREEN=$(tput setaf 2)
16 16 YELLOW=$(tput setaf 3)
17 17 BLUE=$(tput setaf 4)
18 18 MAGENTA=$(tput setaf 5)
19 19 CYAN=$(tput setaf 6)
20 20 WHITE=$(tput setaf 7)
21 21 NOR=$(tput sgr0)
22 22
23 23
24 24 echo "Will use $EDITOR to edit files when necessary"
25 25 echo -n "PREV_RELEASE (X.y.z) [$PREV_RELEASE]: "
26 26 read input
27 27 PREV_RELEASE=${input:-$PREV_RELEASE}
28 28 echo -n "MILESTONE (X.y) [$MILESTONE]: "
29 29 read input
30 30 MILESTONE=${input:-$MILESTONE}
31 31 echo -n "VERSION (X.y.z) [$VERSION]:"
32 32 read input
33 33 VERSION=${input:-$VERSION}
34 34 echo -n "BRANCH (master|X.y) [$BRANCH]:"
35 35 read input
36 36 BRANCH=${input:-$BRANCH}
37 37
38 38 ask_section(){
39 39 echo
40 40 echo $BLUE"$1"$NOR
41 41 echo -n $GREEN"Press Enter to continue, S to skip: "$NOR
42 42 read -n1 value
43 43 echo
44 44 if [ -z $value ] || [ $value = 'y' ] ; then
45 45 return 0
46 46 fi
47 47 return 1
48 48 }
49 49
50 50
51 51 maybe_edit(){
52 52 echo
53 53 echo $BLUE"$1"$NOR
54 54 echo -n $GREEN"Press e to Edit $1, any other keys to skip: "$NOR
55 55 read -n1 value
56 56 echo
57 57 if [ $value = 'e' ] ; then
58 58 $EDITOR $1
59 59 fi
60 60 }
61 61
62 62
63 63
64 64 echo
65 if ask_section "Updating what's new with informations from docs/source/whatsnew/pr"
65 if ask_section "Updating what's new with information from docs/source/whatsnew/pr"
66 66 then
67 67 python tools/update_whatsnew.py
68 68
69 69 echo
70 70 echo $BLUE"please move the contents of "docs/source/whatsnew/development.rst" to version-X.rst"$NOR
71 71 echo $GREEN"Press enter to continue"$NOR
72 72 read
73 73 fi
74 74
75 75 if ask_section "Gen Stats, and authors"
76 76 then
77 77
78 78 echo
79 79 echo $BLUE"here are all the authors that contributed to this release:"$NOR
80 80 git log --format="%aN <%aE>" $PREV_RELEASE... | sort -u -f
81 81
82 82 echo
83 83 echo $BLUE"If you see any duplicates cancel (Ctrl-C), then edit .mailmap."
84 84 echo $GREEN"Press enter to continue:"$NOR
85 85 read
86 86
87 87 echo $BLUE"generating stats"$NOR
88 88 python tools/github_stats.py --milestone $MILESTONE > stats.rst
89 89
90 90 echo $BLUE"stats.rst files generated."$NOR
91 91 echo $GREEN"Please merge it with the right file (github-stats-X.rst) and commit."$NOR
92 92 echo $GREEN"press enter to continue."$NOR
93 93 read
94 94
95 95 fi
96 96
97 97 if ask_section "Generate API difference (using frapuccino)"
98 98 then
99 99 echo $BLUE"Checking out $PREV_RELEASE"$NOR
100 100 git checkout $PREV_RELEASE
101 101 echo $BLUE"Saving API to file $PREV_RELEASE"$NOR
102 102 frappuccino IPython --save IPython-$PREV_RELEASE.json
103 echo $BLUE"comming back to $BRANCH"$NOR
103 echo $BLUE"coming back to $BRANCH"$NOR
104 104 git checkout $BRANCH
105 105 echo $BLUE"comparing ..."$NOR
106 106 frappuccino IPython --compare IPython-$PREV_RELEASE.json
107 107 echo $GREEN"Use the above guideline to write an API changelog ..."$NOR
108 108 echo $GREEN"Press any keys to continue"$NOR
109 109 read
110 110 fi
111 111
112 112 echo "Cleaning repository"
113 113 git clean -xfdi
114 114
115 115 echo $GREEN"please update version number in ${RED}IPython/core/release.py${NOR} , Do not commit yet – we'll do it later."$NOR
116 116 echo $GREEN"I tried ${RED}sed -i bkp -e '/Uncomment/s/^# //g' IPython/core/release.py${NOR}"
117 117 sed -i bkp -e '/Uncomment/s/^# //g' IPython/core/release.py
118 118 rm IPython/core/release.pybkp
119 119 git diff
120 120 maybe_edit IPython/core/release.py
121 121
122 122 echo $GREEN"Press enter to continue"$NOR
123 123 read
124 124
125 125 if ask_section "Build the documentation ?"
126 126 then
127 127 make html -C docs
128 128 echo
129 129 echo $GREEN"Check the docs, press enter to continue"$NOR
130 130 read
131 131
132 132 fi
133 133
134 134 if ask_section "Should we commit, tag, push... etc ? "
135 135 then
136 136 echo
137 137 echo $BLUE"Let's commit : git commit -am \"release $VERSION\" -S"
138 138 echo $GREEN"Press enter to commit"$NOR
139 139 read
140 140 git commit -am "release $VERSION" -S
141 141
142 142 echo
143 143 echo $BLUE"git push origin \$BRANCH ($BRANCH)?"$NOR
144 144 echo $GREEN"Make sure you can push"$NOR
145 145 echo $GREEN"Press enter to continue"$NOR
146 146 read
147 147 git push origin $BRANCH
148 148
149 149 echo
150 150 echo "Let's tag : git tag -am \"release $VERSION\" \"$VERSION\" -s"
151 151 echo $GREEN"Press enter to tag commit"$NOR
152 152 read
153 153 git tag -am "release $VERSION" "$VERSION" -s
154 154
155 155 echo
156 156 echo $BLUE"And push the tag: git push origin \$VERSION ?"$NOR
157 157 echo $GREEN"Press enter to continue"$NOR
158 158 read
159 159 git push origin $VERSION
160 160
161 161
162 162 echo $GREEN"please update version number and back to .dev in ${RED}IPython/core/release.py"
163 163 echo $GREEN"I tried ${RED}sed -i bkp -e '/Uncomment/s/^/# /g' IPython/core/release.py${NOR}"
164 164 sed -i bkp -e '/Uncomment/s/^/# /g' IPython/core/release.py
165 165 rm IPython/core/release.pybkp
166 166 git diff
167 167 echo $GREEN"Please bump ${RED}the minor version number${NOR}"
168 168 maybe_edit IPython/core/release.py
169 169 echo ${BLUE}"Do not commit yet – we'll do it later."$NOR
170 170
171 171
172 172 echo $GREEN"Press enter to continue"$NOR
173 173 read
174 174
175 175 echo
176 176 echo "Let's commit : "$BLUE"git commit -am \"back to dev\""$NOR
177 177 echo $GREEN"Press enter to commit"$NOR
178 178 read
179 179 git commit -am "back to dev"
180 180
181 181 echo
182 182 echo $BLUE"git push origin \$BRANCH ($BRANCH)?"$NOR
183 183 echo $GREEN"Press enter to continue"$NOR
184 184 read
185 185 git push origin $BRANCH
186 186
187 187
188 188 echo
189 189 echo $BLUE"let's : git checkout $VERSION"$NOR
190 190 echo $GREEN"Press enter to continue"$NOR
191 191 read
192 192 git checkout $VERSION
193 193 fi
194 194
195 195 if ask_section "Should we build and release ?"
196 196 then
197 197
198 198 echo $BLUE"going to set SOURCE_DATE_EPOCH"$NOR
199 199 echo $BLUE'export SOURCE_DATE_EPOCH=$(git show -s --format=%ct HEAD)'$NOR
200 200 echo $GREEN"Press enter to continue"$NOR
201 201 read
202 202
203 203 export SOURCE_DATE_EPOCH=$(git show -s --format=%ct HEAD)
204 204
205 205 echo $BLUE"SOURCE_DATE_EPOCH set to $SOURCE_DATE_EPOCH"$NOR
206 206 echo $GREEN"Press enter to continue"$NOR
207 207 read
208 208
209 209
210 210
211 211 echo
212 212 echo $BLUE"Attempting to build package..."$NOR
213 213
214 214 tools/release
215 215
216 216
217 217 echo $RED'$ shasum -a 256 dist/*'
218 218 shasum -a 256 dist/*
219 219 echo $NOR
220 220
221 221 echo $BLUE"We are going to rebuild, node the hash above, and compare them to the rebuild"$NOR
222 222 echo $GREEN"Press enter to continue"$NOR
223 223 read
224 224
225 225 echo
226 226 echo $BLUE"Attempting to build package..."$NOR
227 227
228 228 tools/release
229 229
230 230 echo $RED"Check the shasum for SOURCE_DATE_EPOCH=$SOURCE_DATE_EPOCH"
231 231 echo $RED'$ shasum -a 256 dist/*'
232 232 shasum -a 256 dist/*
233 233 echo $NOR
234 234
235 235 if ask_section "upload packages ?"
236 236 then
237 237 tools/release upload
238 238 fi
239 239 fi
@@ -1,68 +1,68 b''
1 1 """
2 2 Un-targz and retargz a targz file to ensure reproducible build.
3 3
4 4 usage:
5 5
6 6 $ export SOURCE_DATE_EPOCH=$(date +%s)
7 7 ...
8 8 $ python retar.py <tarfile.gz>
9 9
10 10 The process of creating an sdist can be non-reproducible:
11 11 - directory created during the process get a mtime of the creation date;
12 - gziping files embed the timestamp of fo zip creation.
12 - gziping files embed the timestamp of zip creation.
13 13
14 14 This will untar-retar; ensuring that all mtime > SOURCE_DATE_EPOCH will be set
15 15 equal to SOURCE_DATE_EPOCH.
16 16
17 17 """
18 18
19 19 import tarfile
20 20 import sys
21 21 import os
22 22 import gzip
23 23 import io
24 24
25 25 from pathlib import Path
26 26
27 27 if len(sys.argv) > 2:
28 28 raise ValueError("Too many arguments")
29 29
30 30
31 31 timestamp = int(os.environ["SOURCE_DATE_EPOCH"])
32 32
33 33 path = Path(sys.argv[1])
34 34 old_buf = io.BytesIO()
35 35 with open(path, "rb") as f:
36 36 old_buf.write(f.read())
37 37 old_buf.seek(0)
38 38 old = tarfile.open(fileobj=old_buf, mode="r:gz")
39 39
40 40 buf = io.BytesIO()
41 41 new = tarfile.open(fileobj=buf, mode="w", format=tarfile.GNU_FORMAT)
42 42 for i, m in enumerate(old):
43 43 data = None
44 44 # mutation does not work, copy
45 45 if m.name.endswith('.DS_Store'):
46 46 continue
47 47 m2 = tarfile.TarInfo(m.name)
48 48 m2.mtime = min(timestamp, m.mtime)
49 49 m2.size = m.size
50 50 m2.type = m.type
51 51 m2.linkname = m.linkname
52 52 m2.mode = m.mode
53 53 if m.isdir():
54 54 new.addfile(m2)
55 55 else:
56 56 data = old.extractfile(m)
57 57 new.addfile(m2, data)
58 58 new.close()
59 59 old.close()
60 60
61 61 buf.seek(0)
62 62 with open(path, "wb") as f:
63 63 with gzip.GzipFile('', "wb", fileobj=f, mtime=timestamp) as gzf:
64 64 gzf.write(buf.read())
65 65
66 66 # checks the archive is valid.
67 67 archive = tarfile.open(path)
68 68 names = archive.getnames()
1 NO CONTENT: modified file
The requested commit or file is too big and content was truncated. Show full diff
General Comments 0
You need to be logged in to leave comments. Login now